mtd: nand: omap: Clean up device tree support
[cascardo/linux.git] / drivers / memory / omap-gpmc.c
1 /*
2  * GPMC support functions
3  *
4  * Copyright (C) 2005-2006 Nokia Corporation
5  *
6  * Author: Juha Yrjola
7  *
8  * Copyright (C) 2009 Texas Instruments
9  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 #include <linux/irq.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/err.h>
19 #include <linux/clk.h>
20 #include <linux/ioport.h>
21 #include <linux/spinlock.h>
22 #include <linux/io.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/platform_device.h>
27 #include <linux/of.h>
28 #include <linux/of_address.h>
29 #include <linux/of_mtd.h>
30 #include <linux/of_device.h>
31 #include <linux/of_platform.h>
32 #include <linux/omap-gpmc.h>
33 #include <linux/pm_runtime.h>
34
35 #include <linux/platform_data/mtd-nand-omap2.h>
36 #include <linux/platform_data/mtd-onenand-omap2.h>
37
38 #include <asm/mach-types.h>
39
40 #define DEVICE_NAME             "omap-gpmc"
41
42 /* GPMC register offsets */
43 #define GPMC_REVISION           0x00
44 #define GPMC_SYSCONFIG          0x10
45 #define GPMC_SYSSTATUS          0x14
46 #define GPMC_IRQSTATUS          0x18
47 #define GPMC_IRQENABLE          0x1c
48 #define GPMC_TIMEOUT_CONTROL    0x40
49 #define GPMC_ERR_ADDRESS        0x44
50 #define GPMC_ERR_TYPE           0x48
51 #define GPMC_CONFIG             0x50
52 #define GPMC_STATUS             0x54
53 #define GPMC_PREFETCH_CONFIG1   0x1e0
54 #define GPMC_PREFETCH_CONFIG2   0x1e4
55 #define GPMC_PREFETCH_CONTROL   0x1ec
56 #define GPMC_PREFETCH_STATUS    0x1f0
57 #define GPMC_ECC_CONFIG         0x1f4
58 #define GPMC_ECC_CONTROL        0x1f8
59 #define GPMC_ECC_SIZE_CONFIG    0x1fc
60 #define GPMC_ECC1_RESULT        0x200
61 #define GPMC_ECC_BCH_RESULT_0   0x240   /* not available on OMAP2 */
62 #define GPMC_ECC_BCH_RESULT_1   0x244   /* not available on OMAP2 */
63 #define GPMC_ECC_BCH_RESULT_2   0x248   /* not available on OMAP2 */
64 #define GPMC_ECC_BCH_RESULT_3   0x24c   /* not available on OMAP2 */
65 #define GPMC_ECC_BCH_RESULT_4   0x300   /* not available on OMAP2 */
66 #define GPMC_ECC_BCH_RESULT_5   0x304   /* not available on OMAP2 */
67 #define GPMC_ECC_BCH_RESULT_6   0x308   /* not available on OMAP2 */
68
69 /* GPMC ECC control settings */
70 #define GPMC_ECC_CTRL_ECCCLEAR          0x100
71 #define GPMC_ECC_CTRL_ECCDISABLE        0x000
72 #define GPMC_ECC_CTRL_ECCREG1           0x001
73 #define GPMC_ECC_CTRL_ECCREG2           0x002
74 #define GPMC_ECC_CTRL_ECCREG3           0x003
75 #define GPMC_ECC_CTRL_ECCREG4           0x004
76 #define GPMC_ECC_CTRL_ECCREG5           0x005
77 #define GPMC_ECC_CTRL_ECCREG6           0x006
78 #define GPMC_ECC_CTRL_ECCREG7           0x007
79 #define GPMC_ECC_CTRL_ECCREG8           0x008
80 #define GPMC_ECC_CTRL_ECCREG9           0x009
81
82 #define GPMC_CONFIG_LIMITEDADDRESS              BIT(1)
83
84 #define GPMC_STATUS_EMPTYWRITEBUFFERSTATUS      BIT(0)
85
86 #define GPMC_CONFIG2_CSEXTRADELAY               BIT(7)
87 #define GPMC_CONFIG3_ADVEXTRADELAY              BIT(7)
88 #define GPMC_CONFIG4_OEEXTRADELAY               BIT(7)
89 #define GPMC_CONFIG4_WEEXTRADELAY               BIT(23)
90 #define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN        BIT(6)
91 #define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN        BIT(7)
92
93 #define GPMC_CS0_OFFSET         0x60
94 #define GPMC_CS_SIZE            0x30
95 #define GPMC_BCH_SIZE           0x10
96
97 #define GPMC_MEM_END            0x3FFFFFFF
98
99 #define GPMC_CHUNK_SHIFT        24              /* 16 MB */
100 #define GPMC_SECTION_SHIFT      28              /* 128 MB */
101
102 #define CS_NUM_SHIFT            24
103 #define ENABLE_PREFETCH         (0x1 << 7)
104 #define DMA_MPU_MODE            2
105
106 #define GPMC_REVISION_MAJOR(l)          ((l >> 4) & 0xf)
107 #define GPMC_REVISION_MINOR(l)          (l & 0xf)
108
109 #define GPMC_HAS_WR_ACCESS              0x1
110 #define GPMC_HAS_WR_DATA_MUX_BUS        0x2
111 #define GPMC_HAS_MUX_AAD                0x4
112
113 #define GPMC_NR_WAITPINS                4
114
115 #define GPMC_CS_CONFIG1         0x00
116 #define GPMC_CS_CONFIG2         0x04
117 #define GPMC_CS_CONFIG3         0x08
118 #define GPMC_CS_CONFIG4         0x0c
119 #define GPMC_CS_CONFIG5         0x10
120 #define GPMC_CS_CONFIG6         0x14
121 #define GPMC_CS_CONFIG7         0x18
122 #define GPMC_CS_NAND_COMMAND    0x1c
123 #define GPMC_CS_NAND_ADDRESS    0x20
124 #define GPMC_CS_NAND_DATA       0x24
125
126 /* Control Commands */
127 #define GPMC_CONFIG_RDY_BSY     0x00000001
128 #define GPMC_CONFIG_DEV_SIZE    0x00000002
129 #define GPMC_CONFIG_DEV_TYPE    0x00000003
130
131 #define GPMC_CONFIG1_WRAPBURST_SUPP     (1 << 31)
132 #define GPMC_CONFIG1_READMULTIPLE_SUPP  (1 << 30)
133 #define GPMC_CONFIG1_READTYPE_ASYNC     (0 << 29)
134 #define GPMC_CONFIG1_READTYPE_SYNC      (1 << 29)
135 #define GPMC_CONFIG1_WRITEMULTIPLE_SUPP (1 << 28)
136 #define GPMC_CONFIG1_WRITETYPE_ASYNC    (0 << 27)
137 #define GPMC_CONFIG1_WRITETYPE_SYNC     (1 << 27)
138 #define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25)
139 /** CLKACTIVATIONTIME Max Ticks */
140 #define GPMC_CONFIG1_CLKACTIVATIONTIME_MAX 2
141 #define GPMC_CONFIG1_PAGE_LEN(val)      ((val & 3) << 23)
142 /** ATTACHEDDEVICEPAGELENGTH Max Value */
143 #define GPMC_CONFIG1_ATTACHEDDEVICEPAGELENGTH_MAX 2
144 #define GPMC_CONFIG1_WAIT_READ_MON      (1 << 22)
145 #define GPMC_CONFIG1_WAIT_WRITE_MON     (1 << 21)
146 #define GPMC_CONFIG1_WAIT_MON_TIME(val) ((val & 3) << 18)
147 /** WAITMONITORINGTIME Max Ticks */
148 #define GPMC_CONFIG1_WAITMONITORINGTIME_MAX  2
149 #define GPMC_CONFIG1_WAIT_PIN_SEL(val)  ((val & 3) << 16)
150 #define GPMC_CONFIG1_DEVICESIZE(val)    ((val & 3) << 12)
151 #define GPMC_CONFIG1_DEVICESIZE_16      GPMC_CONFIG1_DEVICESIZE(1)
152 /** DEVICESIZE Max Value */
153 #define GPMC_CONFIG1_DEVICESIZE_MAX     1
154 #define GPMC_CONFIG1_DEVICETYPE(val)    ((val & 3) << 10)
155 #define GPMC_CONFIG1_DEVICETYPE_NOR     GPMC_CONFIG1_DEVICETYPE(0)
156 #define GPMC_CONFIG1_MUXTYPE(val)       ((val & 3) << 8)
157 #define GPMC_CONFIG1_TIME_PARA_GRAN     (1 << 4)
158 #define GPMC_CONFIG1_FCLK_DIV(val)      (val & 3)
159 #define GPMC_CONFIG1_FCLK_DIV2          (GPMC_CONFIG1_FCLK_DIV(1))
160 #define GPMC_CONFIG1_FCLK_DIV3          (GPMC_CONFIG1_FCLK_DIV(2))
161 #define GPMC_CONFIG1_FCLK_DIV4          (GPMC_CONFIG1_FCLK_DIV(3))
162 #define GPMC_CONFIG7_CSVALID            (1 << 6)
163
164 #define GPMC_CONFIG7_BASEADDRESS_MASK   0x3f
165 #define GPMC_CONFIG7_CSVALID_MASK       BIT(6)
166 #define GPMC_CONFIG7_MASKADDRESS_OFFSET 8
167 #define GPMC_CONFIG7_MASKADDRESS_MASK   (0xf << GPMC_CONFIG7_MASKADDRESS_OFFSET)
168 /* All CONFIG7 bits except reserved bits */
169 #define GPMC_CONFIG7_MASK               (GPMC_CONFIG7_BASEADDRESS_MASK | \
170                                          GPMC_CONFIG7_CSVALID_MASK |     \
171                                          GPMC_CONFIG7_MASKADDRESS_MASK)
172
173 #define GPMC_DEVICETYPE_NOR             0
174 #define GPMC_DEVICETYPE_NAND            2
175 #define GPMC_CONFIG_WRITEPROTECT        0x00000010
176 #define WR_RD_PIN_MONITORING            0x00600000
177
178 /* ECC commands */
179 #define GPMC_ECC_READ           0 /* Reset Hardware ECC for read */
180 #define GPMC_ECC_WRITE          1 /* Reset Hardware ECC for write */
181 #define GPMC_ECC_READSYN        2 /* Reset before syndrom is read back */
182
183 /* XXX: Only NAND irq has been considered,currently these are the only ones used
184  */
185 #define GPMC_NR_IRQ             2
186
187 enum gpmc_clk_domain {
188         GPMC_CD_FCLK,
189         GPMC_CD_CLK
190 };
191
192 struct gpmc_cs_data {
193         const char *name;
194
195 #define GPMC_CS_RESERVED        (1 << 0)
196         u32 flags;
197
198         struct resource mem;
199 };
200
201 /* Structure to save gpmc cs context */
202 struct gpmc_cs_config {
203         u32 config1;
204         u32 config2;
205         u32 config3;
206         u32 config4;
207         u32 config5;
208         u32 config6;
209         u32 config7;
210         int is_valid;
211 };
212
213 /*
214  * Structure to save/restore gpmc context
215  * to support core off on OMAP3
216  */
217 struct omap3_gpmc_regs {
218         u32 sysconfig;
219         u32 irqenable;
220         u32 timeout_ctrl;
221         u32 config;
222         u32 prefetch_config1;
223         u32 prefetch_config2;
224         u32 prefetch_control;
225         struct gpmc_cs_config cs_context[GPMC_CS_NUM];
226 };
227
228 struct gpmc_device {
229         struct device *dev;
230         int irq;
231         struct irq_chip irq_chip;
232 };
233
234 static struct irq_domain *gpmc_irq_domain;
235
236 static struct resource  gpmc_mem_root;
237 static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM];
238 static DEFINE_SPINLOCK(gpmc_mem_lock);
239 /* Define chip-selects as reserved by default until probe completes */
240 static unsigned int gpmc_cs_num = GPMC_CS_NUM;
241 static unsigned int gpmc_nr_waitpins;
242 static resource_size_t phys_base, mem_size;
243 static unsigned gpmc_capability;
244 static void __iomem *gpmc_base;
245
246 static struct clk *gpmc_l3_clk;
247
248 static irqreturn_t gpmc_handle_irq(int irq, void *dev);
249
250 static void gpmc_write_reg(int idx, u32 val)
251 {
252         writel_relaxed(val, gpmc_base + idx);
253 }
254
255 static u32 gpmc_read_reg(int idx)
256 {
257         return readl_relaxed(gpmc_base + idx);
258 }
259
260 void gpmc_cs_write_reg(int cs, int idx, u32 val)
261 {
262         void __iomem *reg_addr;
263
264         reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
265         writel_relaxed(val, reg_addr);
266 }
267
268 static u32 gpmc_cs_read_reg(int cs, int idx)
269 {
270         void __iomem *reg_addr;
271
272         reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
273         return readl_relaxed(reg_addr);
274 }
275
276 /* TODO: Add support for gpmc_fck to clock framework and use it */
277 static unsigned long gpmc_get_fclk_period(void)
278 {
279         unsigned long rate = clk_get_rate(gpmc_l3_clk);
280
281         rate /= 1000;
282         rate = 1000000000 / rate;       /* In picoseconds */
283
284         return rate;
285 }
286
287 /**
288  * gpmc_get_clk_period - get period of selected clock domain in ps
289  * @cs Chip Select Region.
290  * @cd Clock Domain.
291  *
292  * GPMC_CS_CONFIG1 GPMCFCLKDIVIDER for cs has to be setup
293  * prior to calling this function with GPMC_CD_CLK.
294  */
295 static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd)
296 {
297
298         unsigned long tick_ps = gpmc_get_fclk_period();
299         u32 l;
300         int div;
301
302         switch (cd) {
303         case GPMC_CD_CLK:
304                 /* get current clk divider */
305                 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
306                 div = (l & 0x03) + 1;
307                 /* get GPMC_CLK period */
308                 tick_ps *= div;
309                 break;
310         case GPMC_CD_FCLK:
311                 /* FALL-THROUGH */
312         default:
313                 break;
314         }
315
316         return tick_ps;
317
318 }
319
320 static unsigned int gpmc_ns_to_clk_ticks(unsigned int time_ns, int cs,
321                                          enum gpmc_clk_domain cd)
322 {
323         unsigned long tick_ps;
324
325         /* Calculate in picosecs to yield more exact results */
326         tick_ps = gpmc_get_clk_period(cs, cd);
327
328         return (time_ns * 1000 + tick_ps - 1) / tick_ps;
329 }
330
331 static unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
332 {
333         return gpmc_ns_to_clk_ticks(time_ns, /* any CS */ 0, GPMC_CD_FCLK);
334 }
335
336 static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
337 {
338         unsigned long tick_ps;
339
340         /* Calculate in picosecs to yield more exact results */
341         tick_ps = gpmc_get_fclk_period();
342
343         return (time_ps + tick_ps - 1) / tick_ps;
344 }
345
346 unsigned int gpmc_clk_ticks_to_ns(unsigned ticks, int cs,
347                                   enum gpmc_clk_domain cd)
348 {
349         return ticks * gpmc_get_clk_period(cs, cd) / 1000;
350 }
351
352 unsigned int gpmc_ticks_to_ns(unsigned int ticks)
353 {
354         return gpmc_clk_ticks_to_ns(ticks, /* any CS */ 0, GPMC_CD_FCLK);
355 }
356
357 static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
358 {
359         return ticks * gpmc_get_fclk_period();
360 }
361
362 static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps)
363 {
364         unsigned long ticks = gpmc_ps_to_ticks(time_ps);
365
366         return ticks * gpmc_get_fclk_period();
367 }
368
369 static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value)
370 {
371         u32 l;
372
373         l = gpmc_cs_read_reg(cs, reg);
374         if (value)
375                 l |= mask;
376         else
377                 l &= ~mask;
378         gpmc_cs_write_reg(cs, reg, l);
379 }
380
381 static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
382 {
383         gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1,
384                            GPMC_CONFIG1_TIME_PARA_GRAN,
385                            p->time_para_granularity);
386         gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2,
387                            GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay);
388         gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3,
389                            GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay);
390         gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
391                            GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
392         gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
393                            GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
394         gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
395                            GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
396                            p->cycle2cyclesamecsen);
397         gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
398                            GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN,
399                            p->cycle2cyclediffcsen);
400 }
401
402 #ifdef CONFIG_OMAP_GPMC_DEBUG
403 /**
404  * get_gpmc_timing_reg - read a timing parameter and print DTS settings for it.
405  * @cs:      Chip Select Region
406  * @reg:     GPMC_CS_CONFIGn register offset.
407  * @st_bit:  Start Bit
408  * @end_bit: End Bit. Must be >= @st_bit.
409  * @ma:x     Maximum parameter value (before optional @shift).
410  *           If 0, maximum is as high as @st_bit and @end_bit allow.
411  * @name:    DTS node name, w/o "gpmc,"
412  * @cd:      Clock Domain of timing parameter.
413  * @shift:   Parameter value left shifts @shift, which is then printed instead of value.
414  * @raw:     Raw Format Option.
415  *           raw format:  gpmc,name = <value>
416  *           tick format: gpmc,name = <value> /&zwj;* x ns -- y ns; x ticks *&zwj;/
417  *           Where x ns -- y ns result in the same tick value.
418  *           When @max is exceeded, "invalid" is printed inside comment.
419  * @noval:   Parameter values equal to 0 are not printed.
420  * @return:  Specified timing parameter (after optional @shift).
421  *
422  */
423 static int get_gpmc_timing_reg(
424         /* timing specifiers */
425         int cs, int reg, int st_bit, int end_bit, int max,
426         const char *name, const enum gpmc_clk_domain cd,
427         /* value transform */
428         int shift,
429         /* format specifiers */
430         bool raw, bool noval)
431 {
432         u32 l;
433         int nr_bits;
434         int mask;
435         bool invalid;
436
437         l = gpmc_cs_read_reg(cs, reg);
438         nr_bits = end_bit - st_bit + 1;
439         mask = (1 << nr_bits) - 1;
440         l = (l >> st_bit) & mask;
441         if (!max)
442                 max = mask;
443         invalid = l > max;
444         if (shift)
445                 l = (shift << l);
446         if (noval && (l == 0))
447                 return 0;
448         if (!raw) {
449                 /* DTS tick format for timings in ns */
450                 unsigned int time_ns;
451                 unsigned int time_ns_min = 0;
452
453                 if (l)
454                         time_ns_min = gpmc_clk_ticks_to_ns(l - 1, cs, cd) + 1;
455                 time_ns = gpmc_clk_ticks_to_ns(l, cs, cd);
456                 pr_info("gpmc,%s = <%u> /* %u ns - %u ns; %i ticks%s*/\n",
457                         name, time_ns, time_ns_min, time_ns, l,
458                         invalid ? "; invalid " : " ");
459         } else {
460                 /* raw format */
461                 pr_info("gpmc,%s = <%u>%s\n", name, l,
462                         invalid ? " /* invalid */" : "");
463         }
464
465         return l;
466 }
467
468 #define GPMC_PRINT_CONFIG(cs, config) \
469         pr_info("cs%i %s: 0x%08x\n", cs, #config, \
470                 gpmc_cs_read_reg(cs, config))
471 #define GPMC_GET_RAW(reg, st, end, field) \
472         get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 1, 0)
473 #define GPMC_GET_RAW_MAX(reg, st, end, max, field) \
474         get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, GPMC_CD_FCLK, 0, 1, 0)
475 #define GPMC_GET_RAW_BOOL(reg, st, end, field) \
476         get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 1, 1)
477 #define GPMC_GET_RAW_SHIFT_MAX(reg, st, end, shift, max, field) \
478         get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, GPMC_CD_FCLK, (shift), 1, 1)
479 #define GPMC_GET_TICKS(reg, st, end, field) \
480         get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 0, 0)
481 #define GPMC_GET_TICKS_CD(reg, st, end, field, cd) \
482         get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, (cd), 0, 0, 0)
483 #define GPMC_GET_TICKS_CD_MAX(reg, st, end, max, field, cd) \
484         get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, (cd), 0, 0, 0)
485
486 static void gpmc_show_regs(int cs, const char *desc)
487 {
488         pr_info("gpmc cs%i %s:\n", cs, desc);
489         GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG1);
490         GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG2);
491         GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG3);
492         GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG4);
493         GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG5);
494         GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG6);
495 }
496
497 /*
498  * Note that gpmc,wait-pin handing wrongly assumes bit 8 is available,
499  * see commit c9fb809.
500  */
501 static void gpmc_cs_show_timings(int cs, const char *desc)
502 {
503         gpmc_show_regs(cs, desc);
504
505         pr_info("gpmc cs%i access configuration:\n", cs);
506         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1,  4,  4, "time-para-granularity");
507         GPMC_GET_RAW(GPMC_CS_CONFIG1,  8,  9, "mux-add-data");
508         GPMC_GET_RAW_MAX(GPMC_CS_CONFIG1, 12, 13,
509                          GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
510         GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
511         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
512         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read");
513         GPMC_GET_RAW_SHIFT_MAX(GPMC_CS_CONFIG1, 23, 24, 4,
514                                GPMC_CONFIG1_ATTACHEDDEVICEPAGELENGTH_MAX,
515                                "burst-length");
516         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 27, 27, "sync-write");
517         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 28, 28, "burst-write");
518         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 29, 29, "gpmc,sync-read");
519         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 30, 30, "burst-read");
520         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 31, 31, "burst-wrap");
521
522         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG2,  7,  7, "cs-extra-delay");
523
524         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG3,  7,  7, "adv-extra-delay");
525
526         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 23, 23, "we-extra-delay");
527         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4,  7,  7, "oe-extra-delay");
528
529         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6,  7,  7, "cycle2cycle-samecsen");
530         GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6,  6,  6, "cycle2cycle-diffcsen");
531
532         pr_info("gpmc cs%i timings configuration:\n", cs);
533         GPMC_GET_TICKS(GPMC_CS_CONFIG2,  0,  3, "cs-on-ns");
534         GPMC_GET_TICKS(GPMC_CS_CONFIG2,  8, 12, "cs-rd-off-ns");
535         GPMC_GET_TICKS(GPMC_CS_CONFIG2, 16, 20, "cs-wr-off-ns");
536
537         GPMC_GET_TICKS(GPMC_CS_CONFIG3,  0,  3, "adv-on-ns");
538         GPMC_GET_TICKS(GPMC_CS_CONFIG3,  8, 12, "adv-rd-off-ns");
539         GPMC_GET_TICKS(GPMC_CS_CONFIG3, 16, 20, "adv-wr-off-ns");
540         if (gpmc_capability & GPMC_HAS_MUX_AAD) {
541                 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 4, 6, "adv-aad-mux-on-ns");
542                 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 24, 26,
543                                 "adv-aad-mux-rd-off-ns");
544                 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 28, 30,
545                                 "adv-aad-mux-wr-off-ns");
546         }
547
548         GPMC_GET_TICKS(GPMC_CS_CONFIG4,  0,  3, "oe-on-ns");
549         GPMC_GET_TICKS(GPMC_CS_CONFIG4,  8, 12, "oe-off-ns");
550         if (gpmc_capability & GPMC_HAS_MUX_AAD) {
551                 GPMC_GET_TICKS(GPMC_CS_CONFIG4,  4,  6, "oe-aad-mux-on-ns");
552                 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 13, 15, "oe-aad-mux-off-ns");
553         }
554         GPMC_GET_TICKS(GPMC_CS_CONFIG4, 16, 19, "we-on-ns");
555         GPMC_GET_TICKS(GPMC_CS_CONFIG4, 24, 28, "we-off-ns");
556
557         GPMC_GET_TICKS(GPMC_CS_CONFIG5,  0,  4, "rd-cycle-ns");
558         GPMC_GET_TICKS(GPMC_CS_CONFIG5,  8, 12, "wr-cycle-ns");
559         GPMC_GET_TICKS(GPMC_CS_CONFIG5, 16, 20, "access-ns");
560
561         GPMC_GET_TICKS(GPMC_CS_CONFIG5, 24, 27, "page-burst-access-ns");
562
563         GPMC_GET_TICKS(GPMC_CS_CONFIG6, 0, 3, "bus-turnaround-ns");
564         GPMC_GET_TICKS(GPMC_CS_CONFIG6, 8, 11, "cycle2cycle-delay-ns");
565
566         GPMC_GET_TICKS_CD_MAX(GPMC_CS_CONFIG1, 18, 19,
567                               GPMC_CONFIG1_WAITMONITORINGTIME_MAX,
568                               "wait-monitoring-ns", GPMC_CD_CLK);
569         GPMC_GET_TICKS_CD_MAX(GPMC_CS_CONFIG1, 25, 26,
570                               GPMC_CONFIG1_CLKACTIVATIONTIME_MAX,
571                               "clk-activation-ns", GPMC_CD_FCLK);
572
573         GPMC_GET_TICKS(GPMC_CS_CONFIG6, 16, 19, "wr-data-mux-bus-ns");
574         GPMC_GET_TICKS(GPMC_CS_CONFIG6, 24, 28, "wr-access-ns");
575 }
576 #else
577 static inline void gpmc_cs_show_timings(int cs, const char *desc)
578 {
579 }
580 #endif
581
582 /**
583  * set_gpmc_timing_reg - set a single timing parameter for Chip Select Region.
584  * Caller is expected to have initialized CONFIG1 GPMCFCLKDIVIDER
585  * prior to calling this function with @cd equal to GPMC_CD_CLK.
586  *
587  * @cs:      Chip Select Region.
588  * @reg:     GPMC_CS_CONFIGn register offset.
589  * @st_bit:  Start Bit
590  * @end_bit: End Bit. Must be >= @st_bit.
591  * @max:     Maximum parameter value.
592  *           If 0, maximum is as high as @st_bit and @end_bit allow.
593  * @time:    Timing parameter in ns.
594  * @cd:      Timing parameter clock domain.
595  * @name:    Timing parameter name.
596  * @return:  0 on success, -1 on error.
597  */
598 static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max,
599                                int time, enum gpmc_clk_domain cd, const char *name)
600 {
601         u32 l;
602         int ticks, mask, nr_bits;
603
604         if (time == 0)
605                 ticks = 0;
606         else
607                 ticks = gpmc_ns_to_clk_ticks(time, cs, cd);
608         nr_bits = end_bit - st_bit + 1;
609         mask = (1 << nr_bits) - 1;
610
611         if (!max)
612                 max = mask;
613
614         if (ticks > max) {
615                 pr_err("%s: GPMC CS%d: %s %d ns, %d ticks > %d ticks\n",
616                        __func__, cs, name, time, ticks, max);
617
618                 return -1;
619         }
620
621         l = gpmc_cs_read_reg(cs, reg);
622 #ifdef CONFIG_OMAP_GPMC_DEBUG
623         pr_info(
624                 "GPMC CS%d: %-17s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
625                cs, name, ticks, gpmc_get_clk_period(cs, cd) * ticks / 1000,
626                         (l >> st_bit) & mask, time);
627 #endif
628         l &= ~(mask << st_bit);
629         l |= ticks << st_bit;
630         gpmc_cs_write_reg(cs, reg, l);
631
632         return 0;
633 }
634
635 #define GPMC_SET_ONE_CD_MAX(reg, st, end, max, field, cd)  \
636         if (set_gpmc_timing_reg(cs, (reg), (st), (end), (max), \
637             t->field, (cd), #field) < 0)                       \
638                 return -1
639
640 #define GPMC_SET_ONE(reg, st, end, field) \
641         GPMC_SET_ONE_CD_MAX(reg, st, end, 0, field, GPMC_CD_FCLK)
642
643 /**
644  * gpmc_calc_waitmonitoring_divider - calculate proper GPMCFCLKDIVIDER based on WAITMONITORINGTIME
645  * WAITMONITORINGTIME will be _at least_ as long as desired, i.e.
646  * read  --> don't sample bus too early
647  * write --> data is longer on bus
648  *
649  * Formula:
650  * gpmc_clk_div + 1 = ceil(ceil(waitmonitoringtime_ns / gpmc_fclk_ns)
651  *                    / waitmonitoring_ticks)
652  * WAITMONITORINGTIME resulting in 0 or 1 tick with div = 1 are caught by
653  * div <= 0 check.
654  *
655  * @wait_monitoring: WAITMONITORINGTIME in ns.
656  * @return:          -1 on failure to scale, else proper divider > 0.
657  */
658 static int gpmc_calc_waitmonitoring_divider(unsigned int wait_monitoring)
659 {
660
661         int div = gpmc_ns_to_ticks(wait_monitoring);
662
663         div += GPMC_CONFIG1_WAITMONITORINGTIME_MAX - 1;
664         div /= GPMC_CONFIG1_WAITMONITORINGTIME_MAX;
665
666         if (div > 4)
667                 return -1;
668         if (div <= 0)
669                 div = 1;
670
671         return div;
672
673 }
674
675 /**
676  * gpmc_calc_divider - calculate GPMC_FCLK divider for sync_clk GPMC_CLK period.
677  * @sync_clk: GPMC_CLK period in ps.
678  * @return:   Returns at least 1 if GPMC_FCLK can be divided to GPMC_CLK.
679  *            Else, returns -1.
680  */
681 int gpmc_calc_divider(unsigned int sync_clk)
682 {
683         int div = gpmc_ps_to_ticks(sync_clk);
684
685         if (div > 4)
686                 return -1;
687         if (div <= 0)
688                 div = 1;
689
690         return div;
691 }
692
693 /**
694  * gpmc_cs_set_timings - program timing parameters for Chip Select Region.
695  * @cs:     Chip Select Region.
696  * @t:      GPMC timing parameters.
697  * @s:      GPMC timing settings.
698  * @return: 0 on success, -1 on error.
699  */
700 int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
701                         const struct gpmc_settings *s)
702 {
703         int div;
704         u32 l;
705
706         div = gpmc_calc_divider(t->sync_clk);
707         if (div < 0)
708                 return div;
709
710         /*
711          * See if we need to change the divider for waitmonitoringtime.
712          *
713          * Calculate GPMCFCLKDIVIDER independent of gpmc,sync-clk-ps in DT for
714          * pure asynchronous accesses, i.e. both read and write asynchronous.
715          * However, only do so if WAITMONITORINGTIME is actually used, i.e.
716          * either WAITREADMONITORING or WAITWRITEMONITORING is set.
717          *
718          * This statement must not change div to scale async WAITMONITORINGTIME
719          * to protect mixed synchronous and asynchronous accesses.
720          *
721          * We raise an error later if WAITMONITORINGTIME does not fit.
722          */
723         if (!s->sync_read && !s->sync_write &&
724             (s->wait_on_read || s->wait_on_write)
725            ) {
726
727                 div = gpmc_calc_waitmonitoring_divider(t->wait_monitoring);
728                 if (div < 0) {
729                         pr_err("%s: waitmonitoringtime %3d ns too large for greatest gpmcfclkdivider.\n",
730                                __func__,
731                                t->wait_monitoring
732                                );
733                         return -1;
734                 }
735         }
736
737         GPMC_SET_ONE(GPMC_CS_CONFIG2,  0,  3, cs_on);
738         GPMC_SET_ONE(GPMC_CS_CONFIG2,  8, 12, cs_rd_off);
739         GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
740
741         GPMC_SET_ONE(GPMC_CS_CONFIG3,  0,  3, adv_on);
742         GPMC_SET_ONE(GPMC_CS_CONFIG3,  8, 12, adv_rd_off);
743         GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
744         if (gpmc_capability & GPMC_HAS_MUX_AAD) {
745                 GPMC_SET_ONE(GPMC_CS_CONFIG3,  4,  6, adv_aad_mux_on);
746                 GPMC_SET_ONE(GPMC_CS_CONFIG3, 24, 26, adv_aad_mux_rd_off);
747                 GPMC_SET_ONE(GPMC_CS_CONFIG3, 28, 30, adv_aad_mux_wr_off);
748         }
749
750         GPMC_SET_ONE(GPMC_CS_CONFIG4,  0,  3, oe_on);
751         GPMC_SET_ONE(GPMC_CS_CONFIG4,  8, 12, oe_off);
752         if (gpmc_capability & GPMC_HAS_MUX_AAD) {
753                 GPMC_SET_ONE(GPMC_CS_CONFIG4,  4,  6, oe_aad_mux_on);
754                 GPMC_SET_ONE(GPMC_CS_CONFIG4, 13, 15, oe_aad_mux_off);
755         }
756         GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
757         GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
758
759         GPMC_SET_ONE(GPMC_CS_CONFIG5,  0,  4, rd_cycle);
760         GPMC_SET_ONE(GPMC_CS_CONFIG5,  8, 12, wr_cycle);
761         GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
762
763         GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
764
765         GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround);
766         GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay);
767
768         if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
769                 GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
770         if (gpmc_capability & GPMC_HAS_WR_ACCESS)
771                 GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
772
773         l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
774         l &= ~0x03;
775         l |= (div - 1);
776         gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
777
778         GPMC_SET_ONE_CD_MAX(GPMC_CS_CONFIG1, 18, 19,
779                             GPMC_CONFIG1_WAITMONITORINGTIME_MAX,
780                             wait_monitoring, GPMC_CD_CLK);
781         GPMC_SET_ONE_CD_MAX(GPMC_CS_CONFIG1, 25, 26,
782                             GPMC_CONFIG1_CLKACTIVATIONTIME_MAX,
783                             clk_activation, GPMC_CD_FCLK);
784
785 #ifdef CONFIG_OMAP_GPMC_DEBUG
786         pr_info("GPMC CS%d CLK period is %lu ns (div %d)\n",
787                         cs, (div * gpmc_get_fclk_period()) / 1000, div);
788 #endif
789
790         gpmc_cs_bool_timings(cs, &t->bool_timings);
791         gpmc_cs_show_timings(cs, "after gpmc_cs_set_timings");
792
793         return 0;
794 }
795
796 static int gpmc_cs_set_memconf(int cs, u32 base, u32 size)
797 {
798         u32 l;
799         u32 mask;
800
801         /*
802          * Ensure that base address is aligned on a
803          * boundary equal to or greater than size.
804          */
805         if (base & (size - 1))
806                 return -EINVAL;
807
808         base >>= GPMC_CHUNK_SHIFT;
809         mask = (1 << GPMC_SECTION_SHIFT) - size;
810         mask >>= GPMC_CHUNK_SHIFT;
811         mask <<= GPMC_CONFIG7_MASKADDRESS_OFFSET;
812
813         l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
814         l &= ~GPMC_CONFIG7_MASK;
815         l |= base & GPMC_CONFIG7_BASEADDRESS_MASK;
816         l |= mask & GPMC_CONFIG7_MASKADDRESS_MASK;
817         l |= GPMC_CONFIG7_CSVALID;
818         gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
819
820         return 0;
821 }
822
823 static void gpmc_cs_enable_mem(int cs)
824 {
825         u32 l;
826
827         l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
828         l |= GPMC_CONFIG7_CSVALID;
829         gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
830 }
831
832 static void gpmc_cs_disable_mem(int cs)
833 {
834         u32 l;
835
836         l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
837         l &= ~GPMC_CONFIG7_CSVALID;
838         gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
839 }
840
841 static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
842 {
843         u32 l;
844         u32 mask;
845
846         l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
847         *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
848         mask = (l >> 8) & 0x0f;
849         *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
850 }
851
852 static int gpmc_cs_mem_enabled(int cs)
853 {
854         u32 l;
855
856         l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
857         return l & GPMC_CONFIG7_CSVALID;
858 }
859
860 static void gpmc_cs_set_reserved(int cs, int reserved)
861 {
862         struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
863
864         gpmc->flags |= GPMC_CS_RESERVED;
865 }
866
867 static bool gpmc_cs_reserved(int cs)
868 {
869         struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
870
871         return gpmc->flags & GPMC_CS_RESERVED;
872 }
873
874 static void gpmc_cs_set_name(int cs, const char *name)
875 {
876         struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
877
878         gpmc->name = name;
879 }
880
881 static const char *gpmc_cs_get_name(int cs)
882 {
883         struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
884
885         return gpmc->name;
886 }
887
888 static unsigned long gpmc_mem_align(unsigned long size)
889 {
890         int order;
891
892         size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
893         order = GPMC_CHUNK_SHIFT - 1;
894         do {
895                 size >>= 1;
896                 order++;
897         } while (size);
898         size = 1 << order;
899         return size;
900 }
901
902 static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
903 {
904         struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
905         struct resource *res = &gpmc->mem;
906         int r;
907
908         size = gpmc_mem_align(size);
909         spin_lock(&gpmc_mem_lock);
910         res->start = base;
911         res->end = base + size - 1;
912         r = request_resource(&gpmc_mem_root, res);
913         spin_unlock(&gpmc_mem_lock);
914
915         return r;
916 }
917
918 static int gpmc_cs_delete_mem(int cs)
919 {
920         struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
921         struct resource *res = &gpmc->mem;
922         int r;
923
924         spin_lock(&gpmc_mem_lock);
925         r = release_resource(res);
926         res->start = 0;
927         res->end = 0;
928         spin_unlock(&gpmc_mem_lock);
929
930         return r;
931 }
932
933 /**
934  * gpmc_cs_remap - remaps a chip-select physical base address
935  * @cs:         chip-select to remap
936  * @base:       physical base address to re-map chip-select to
937  *
938  * Re-maps a chip-select to a new physical base address specified by
939  * "base". Returns 0 on success and appropriate negative error code
940  * on failure.
941  */
942 static int gpmc_cs_remap(int cs, u32 base)
943 {
944         int ret;
945         u32 old_base, size;
946
947         if (cs > gpmc_cs_num) {
948                 pr_err("%s: requested chip-select is disabled\n", __func__);
949                 return -ENODEV;
950         }
951
952         /*
953          * Make sure we ignore any device offsets from the GPMC partition
954          * allocated for the chip select and that the new base confirms
955          * to the GPMC 16MB minimum granularity.
956          */ 
957         base &= ~(SZ_16M - 1);
958
959         gpmc_cs_get_memconf(cs, &old_base, &size);
960         if (base == old_base)
961                 return 0;
962
963         ret = gpmc_cs_delete_mem(cs);
964         if (ret < 0)
965                 return ret;
966
967         ret = gpmc_cs_insert_mem(cs, base, size);
968         if (ret < 0)
969                 return ret;
970
971         ret = gpmc_cs_set_memconf(cs, base, size);
972
973         return ret;
974 }
975
976 int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
977 {
978         struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
979         struct resource *res = &gpmc->mem;
980         int r = -1;
981
982         if (cs > gpmc_cs_num) {
983                 pr_err("%s: requested chip-select is disabled\n", __func__);
984                 return -ENODEV;
985         }
986         size = gpmc_mem_align(size);
987         if (size > (1 << GPMC_SECTION_SHIFT))
988                 return -ENOMEM;
989
990         spin_lock(&gpmc_mem_lock);
991         if (gpmc_cs_reserved(cs)) {
992                 r = -EBUSY;
993                 goto out;
994         }
995         if (gpmc_cs_mem_enabled(cs))
996                 r = adjust_resource(res, res->start & ~(size - 1), size);
997         if (r < 0)
998                 r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
999                                       size, NULL, NULL);
1000         if (r < 0)
1001                 goto out;
1002
1003         /* Disable CS while changing base address and size mask */
1004         gpmc_cs_disable_mem(cs);
1005
1006         r = gpmc_cs_set_memconf(cs, res->start, resource_size(res));
1007         if (r < 0) {
1008                 release_resource(res);
1009                 goto out;
1010         }
1011
1012         /* Enable CS */
1013         gpmc_cs_enable_mem(cs);
1014         *base = res->start;
1015         gpmc_cs_set_reserved(cs, 1);
1016 out:
1017         spin_unlock(&gpmc_mem_lock);
1018         return r;
1019 }
1020 EXPORT_SYMBOL(gpmc_cs_request);
1021
1022 void gpmc_cs_free(int cs)
1023 {
1024         struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
1025         struct resource *res = &gpmc->mem;
1026
1027         spin_lock(&gpmc_mem_lock);
1028         if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
1029                 printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
1030                 BUG();
1031                 spin_unlock(&gpmc_mem_lock);
1032                 return;
1033         }
1034         gpmc_cs_disable_mem(cs);
1035         if (res->flags)
1036                 release_resource(res);
1037         gpmc_cs_set_reserved(cs, 0);
1038         spin_unlock(&gpmc_mem_lock);
1039 }
1040 EXPORT_SYMBOL(gpmc_cs_free);
1041
1042 /**
1043  * gpmc_configure - write request to configure gpmc
1044  * @cmd: command type
1045  * @wval: value to write
1046  * @return status of the operation
1047  */
1048 int gpmc_configure(int cmd, int wval)
1049 {
1050         u32 regval;
1051
1052         switch (cmd) {
1053         case GPMC_CONFIG_WP:
1054                 regval = gpmc_read_reg(GPMC_CONFIG);
1055                 if (wval)
1056                         regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
1057                 else
1058                         regval |= GPMC_CONFIG_WRITEPROTECT;  /* WP is OFF */
1059                 gpmc_write_reg(GPMC_CONFIG, regval);
1060                 break;
1061
1062         default:
1063                 pr_err("%s: command not supported\n", __func__);
1064                 return -EINVAL;
1065         }
1066
1067         return 0;
1068 }
1069 EXPORT_SYMBOL(gpmc_configure);
1070
1071 void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
1072 {
1073         int i;
1074
1075         reg->gpmc_status = gpmc_base + GPMC_STATUS;
1076         reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
1077                                 GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
1078         reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
1079                                 GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs;
1080         reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET +
1081                                 GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs;
1082         reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1;
1083         reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2;
1084         reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL;
1085         reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS;
1086         reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG;
1087         reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL;
1088         reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG;
1089         reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT;
1090
1091         for (i = 0; i < GPMC_BCH_NUM_REMAINDER; i++) {
1092                 reg->gpmc_bch_result0[i] = gpmc_base + GPMC_ECC_BCH_RESULT_0 +
1093                                            GPMC_BCH_SIZE * i;
1094                 reg->gpmc_bch_result1[i] = gpmc_base + GPMC_ECC_BCH_RESULT_1 +
1095                                            GPMC_BCH_SIZE * i;
1096                 reg->gpmc_bch_result2[i] = gpmc_base + GPMC_ECC_BCH_RESULT_2 +
1097                                            GPMC_BCH_SIZE * i;
1098                 reg->gpmc_bch_result3[i] = gpmc_base + GPMC_ECC_BCH_RESULT_3 +
1099                                            GPMC_BCH_SIZE * i;
1100                 reg->gpmc_bch_result4[i] = gpmc_base + GPMC_ECC_BCH_RESULT_4 +
1101                                            i * GPMC_BCH_SIZE;
1102                 reg->gpmc_bch_result5[i] = gpmc_base + GPMC_ECC_BCH_RESULT_5 +
1103                                            i * GPMC_BCH_SIZE;
1104                 reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 +
1105                                            i * GPMC_BCH_SIZE;
1106         }
1107 }
1108
1109 static bool gpmc_nand_writebuffer_empty(void)
1110 {
1111         if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS)
1112                 return true;
1113
1114         return false;
1115 }
1116
1117 static struct gpmc_nand_ops nand_ops = {
1118         .nand_writebuffer_empty = gpmc_nand_writebuffer_empty,
1119 };
1120
1121 /**
1122  * gpmc_omap_get_nand_ops - Get the GPMC NAND interface
1123  * @regs: the GPMC NAND register map exclusive for NAND use.
1124  * @cs: GPMC chip select number on which the NAND sits. The
1125  *      register map returned will be specific to this chip select.
1126  *
1127  * Returns NULL on error e.g. invalid cs.
1128  */
1129 struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
1130 {
1131         if (cs >= gpmc_cs_num)
1132                 return NULL;
1133
1134         gpmc_update_nand_reg(reg, cs);
1135
1136         return &nand_ops;
1137 }
1138 EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops);
1139
1140 int gpmc_get_client_irq(unsigned irq_config)
1141 {
1142         if (!gpmc_irq_domain) {
1143                 pr_warn("%s called before GPMC IRQ domain available\n",
1144                         __func__);
1145                 return 0;
1146         }
1147
1148         if (irq_config >= GPMC_NR_IRQ)
1149                 return 0;
1150
1151         return irq_create_mapping(gpmc_irq_domain, irq_config);
1152 }
1153
1154 static int gpmc_irq_endis(unsigned long hwirq, bool endis)
1155 {
1156         u32 regval;
1157
1158         regval = gpmc_read_reg(GPMC_IRQENABLE);
1159         if (endis)
1160                 regval |= BIT(hwirq);
1161         else
1162                 regval &= ~BIT(hwirq);
1163         gpmc_write_reg(GPMC_IRQENABLE, regval);
1164
1165         return 0;
1166 }
1167
1168 static void gpmc_irq_disable(struct irq_data *p)
1169 {
1170         gpmc_irq_endis(p->hwirq, false);
1171 }
1172
1173 static void gpmc_irq_enable(struct irq_data *p)
1174 {
1175         gpmc_irq_endis(p->hwirq, true);
1176 }
1177
1178 static void gpmc_irq_noop(struct irq_data *data) { }
1179
1180 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
1181
1182 static int gpmc_irq_map(struct irq_domain *d, unsigned int virq,
1183                         irq_hw_number_t hw)
1184 {
1185         struct gpmc_device *gpmc = d->host_data;
1186
1187         irq_set_chip_data(virq, gpmc);
1188         irq_set_chip_and_handler(virq, &gpmc->irq_chip, handle_simple_irq);
1189         irq_modify_status(virq, IRQ_NOREQUEST, IRQ_NOAUTOEN);
1190
1191         return 0;
1192 }
1193
1194 static const struct irq_domain_ops gpmc_irq_domain_ops = {
1195         .map    = gpmc_irq_map,
1196         .xlate  = irq_domain_xlate_twocell,
1197 };
1198
1199 static irqreturn_t gpmc_handle_irq(int irq, void *data)
1200 {
1201         int hwirq, virq;
1202         u32 regval;
1203         struct gpmc_device *gpmc = data;
1204
1205         regval = gpmc_read_reg(GPMC_IRQSTATUS);
1206
1207         if (!regval)
1208                 return IRQ_NONE;
1209
1210         for (hwirq = 0; hwirq < GPMC_NR_IRQ; hwirq++) {
1211                 if (regval & BIT(hwirq)) {
1212                         virq = irq_find_mapping(gpmc_irq_domain, hwirq);
1213                         if (!virq) {
1214                                 dev_warn(gpmc->dev,
1215                                          "spurious irq detected hwirq %d, virq %d\n",
1216                                          hwirq, virq);
1217                         }
1218
1219                         generic_handle_irq(virq);
1220                 }
1221         }
1222
1223         gpmc_write_reg(GPMC_IRQSTATUS, regval);
1224
1225         return IRQ_HANDLED;
1226 }
1227
1228 static int gpmc_setup_irq(struct gpmc_device *gpmc)
1229 {
1230         u32 regval;
1231         int rc;
1232
1233         /* Disable interrupts */
1234         gpmc_write_reg(GPMC_IRQENABLE, 0);
1235
1236         /* clear interrupts */
1237         regval = gpmc_read_reg(GPMC_IRQSTATUS);
1238         gpmc_write_reg(GPMC_IRQSTATUS, regval);
1239
1240         gpmc->irq_chip.name = "gpmc";
1241         gpmc->irq_chip.irq_startup = gpmc_irq_noop_ret;
1242         gpmc->irq_chip.irq_enable = gpmc_irq_enable;
1243         gpmc->irq_chip.irq_disable = gpmc_irq_disable;
1244         gpmc->irq_chip.irq_shutdown = gpmc_irq_noop;
1245         gpmc->irq_chip.irq_ack = gpmc_irq_noop;
1246         gpmc->irq_chip.irq_mask = gpmc_irq_noop;
1247         gpmc->irq_chip.irq_unmask = gpmc_irq_noop;
1248
1249         gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node,
1250                                                 GPMC_NR_IRQ,
1251                                                 &gpmc_irq_domain_ops,
1252                                                 gpmc);
1253         if (!gpmc_irq_domain) {
1254                 dev_err(gpmc->dev, "IRQ domain add failed\n");
1255                 return -ENODEV;
1256         }
1257
1258         rc = request_irq(gpmc->irq, gpmc_handle_irq, 0, "gpmc", gpmc);
1259         if (rc) {
1260                 dev_err(gpmc->dev, "failed to request irq %d: %d\n",
1261                         gpmc->irq, rc);
1262                 irq_domain_remove(gpmc_irq_domain);
1263                 gpmc_irq_domain = NULL;
1264         }
1265
1266         return rc;
1267 }
1268
1269 static int gpmc_free_irq(struct gpmc_device *gpmc)
1270 {
1271         int hwirq;
1272
1273         free_irq(gpmc->irq, gpmc);
1274
1275         for (hwirq = 0; hwirq < GPMC_NR_IRQ; hwirq++)
1276                 irq_dispose_mapping(irq_find_mapping(gpmc_irq_domain, hwirq));
1277
1278         irq_domain_remove(gpmc_irq_domain);
1279         gpmc_irq_domain = NULL;
1280
1281         return 0;
1282 }
1283
1284 static void gpmc_mem_exit(void)
1285 {
1286         int cs;
1287
1288         for (cs = 0; cs < gpmc_cs_num; cs++) {
1289                 if (!gpmc_cs_mem_enabled(cs))
1290                         continue;
1291                 gpmc_cs_delete_mem(cs);
1292         }
1293
1294 }
1295
1296 static void gpmc_mem_init(void)
1297 {
1298         int cs;
1299
1300         /*
1301          * The first 1MB of GPMC address space is typically mapped to
1302          * the internal ROM. Never allocate the first page, to
1303          * facilitate bug detection; even if we didn't boot from ROM.
1304          */
1305         gpmc_mem_root.start = SZ_1M;
1306         gpmc_mem_root.end = GPMC_MEM_END;
1307
1308         /* Reserve all regions that has been set up by bootloader */
1309         for (cs = 0; cs < gpmc_cs_num; cs++) {
1310                 u32 base, size;
1311
1312                 if (!gpmc_cs_mem_enabled(cs))
1313                         continue;
1314                 gpmc_cs_get_memconf(cs, &base, &size);
1315                 if (gpmc_cs_insert_mem(cs, base, size)) {
1316                         pr_warn("%s: disabling cs %d mapped at 0x%x-0x%x\n",
1317                                 __func__, cs, base, base + size);
1318                         gpmc_cs_disable_mem(cs);
1319                 }
1320         }
1321 }
1322
1323 static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk)
1324 {
1325         u32 temp;
1326         int div;
1327
1328         div = gpmc_calc_divider(sync_clk);
1329         temp = gpmc_ps_to_ticks(time_ps);
1330         temp = (temp + div - 1) / div;
1331         return gpmc_ticks_to_ps(temp * div);
1332 }
1333
1334 /* XXX: can the cycles be avoided ? */
1335 static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t,
1336                                        struct gpmc_device_timings *dev_t,
1337                                        bool mux)
1338 {
1339         u32 temp;
1340
1341         /* adv_rd_off */
1342         temp = dev_t->t_avdp_r;
1343         /* XXX: mux check required ? */
1344         if (mux) {
1345                 /* XXX: t_avdp not to be required for sync, only added for tusb
1346                  * this indirectly necessitates requirement of t_avdp_r and
1347                  * t_avdp_w instead of having a single t_avdp
1348                  */
1349                 temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh);
1350                 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1351         }
1352         gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
1353
1354         /* oe_on */
1355         temp = dev_t->t_oeasu; /* XXX: remove this ? */
1356         if (mux) {
1357                 temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach);
1358                 temp = max_t(u32, temp, gpmc_t->adv_rd_off +
1359                                 gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe));
1360         }
1361         gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
1362
1363         /* access */
1364         /* XXX: any scope for improvement ?, by combining oe_on
1365          * and clk_activation, need to check whether
1366          * access = clk_activation + round to sync clk ?
1367          */
1368         temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk);
1369         temp += gpmc_t->clk_activation;
1370         if (dev_t->cyc_oe)
1371                 temp = max_t(u32, temp, gpmc_t->oe_on +
1372                                 gpmc_ticks_to_ps(dev_t->cyc_oe));
1373         gpmc_t->access = gpmc_round_ps_to_ticks(temp);
1374
1375         gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
1376         gpmc_t->cs_rd_off = gpmc_t->oe_off;
1377
1378         /* rd_cycle */
1379         temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez);
1380         temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) +
1381                                                         gpmc_t->access;
1382         /* XXX: barter t_ce_rdyz with t_cez_r ? */
1383         if (dev_t->t_ce_rdyz)
1384                 temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz);
1385         gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
1386
1387         return 0;
1388 }
1389
1390 static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t,
1391                                         struct gpmc_device_timings *dev_t,
1392                                         bool mux)
1393 {
1394         u32 temp;
1395
1396         /* adv_wr_off */
1397         temp = dev_t->t_avdp_w;
1398         if (mux) {
1399                 temp = max_t(u32, temp,
1400                         gpmc_t->clk_activation + dev_t->t_avdh);
1401                 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1402         }
1403         gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
1404
1405         /* wr_data_mux_bus */
1406         temp = max_t(u32, dev_t->t_weasu,
1407                         gpmc_t->clk_activation + dev_t->t_rdyo);
1408         /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?,
1409          * and in that case remember to handle we_on properly
1410          */
1411         if (mux) {
1412                 temp = max_t(u32, temp,
1413                         gpmc_t->adv_wr_off + dev_t->t_aavdh);
1414                 temp = max_t(u32, temp, gpmc_t->adv_wr_off +
1415                                 gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
1416         }
1417         gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
1418
1419         /* we_on */
1420         if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
1421                 gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
1422         else
1423                 gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
1424
1425         /* wr_access */
1426         /* XXX: gpmc_capability check reqd ? , even if not, will not harm */
1427         gpmc_t->wr_access = gpmc_t->access;
1428
1429         /* we_off */
1430         temp = gpmc_t->we_on + dev_t->t_wpl;
1431         temp = max_t(u32, temp,
1432                         gpmc_t->wr_access + gpmc_ticks_to_ps(1));
1433         temp = max_t(u32, temp,
1434                 gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl));
1435         gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
1436
1437         gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
1438                                                         dev_t->t_wph);
1439
1440         /* wr_cycle */
1441         temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk);
1442         temp += gpmc_t->wr_access;
1443         /* XXX: barter t_ce_rdyz with t_cez_w ? */
1444         if (dev_t->t_ce_rdyz)
1445                 temp = max_t(u32, temp,
1446                                  gpmc_t->cs_wr_off + dev_t->t_ce_rdyz);
1447         gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
1448
1449         return 0;
1450 }
1451
1452 static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
1453                                         struct gpmc_device_timings *dev_t,
1454                                         bool mux)
1455 {
1456         u32 temp;
1457
1458         /* adv_rd_off */
1459         temp = dev_t->t_avdp_r;
1460         if (mux)
1461                 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1462         gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
1463
1464         /* oe_on */
1465         temp = dev_t->t_oeasu;
1466         if (mux)
1467                 temp = max_t(u32, temp,
1468                         gpmc_t->adv_rd_off + dev_t->t_aavdh);
1469         gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
1470
1471         /* access */
1472         temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
1473                                 gpmc_t->oe_on + dev_t->t_oe);
1474         temp = max_t(u32, temp,
1475                                 gpmc_t->cs_on + dev_t->t_ce);
1476         temp = max_t(u32, temp,
1477                                 gpmc_t->adv_on + dev_t->t_aa);
1478         gpmc_t->access = gpmc_round_ps_to_ticks(temp);
1479
1480         gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
1481         gpmc_t->cs_rd_off = gpmc_t->oe_off;
1482
1483         /* rd_cycle */
1484         temp = max_t(u32, dev_t->t_rd_cycle,
1485                         gpmc_t->cs_rd_off + dev_t->t_cez_r);
1486         temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez);
1487         gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
1488
1489         return 0;
1490 }
1491
1492 static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t,
1493                                          struct gpmc_device_timings *dev_t,
1494                                          bool mux)
1495 {
1496         u32 temp;
1497
1498         /* adv_wr_off */
1499         temp = dev_t->t_avdp_w;
1500         if (mux)
1501                 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1502         gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
1503
1504         /* wr_data_mux_bus */
1505         temp = dev_t->t_weasu;
1506         if (mux) {
1507                 temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh);
1508                 temp = max_t(u32, temp, gpmc_t->adv_wr_off +
1509                                 gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
1510         }
1511         gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
1512
1513         /* we_on */
1514         if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
1515                 gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
1516         else
1517                 gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
1518
1519         /* we_off */
1520         temp = gpmc_t->we_on + dev_t->t_wpl;
1521         gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
1522
1523         gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
1524                                                         dev_t->t_wph);
1525
1526         /* wr_cycle */
1527         temp = max_t(u32, dev_t->t_wr_cycle,
1528                                 gpmc_t->cs_wr_off + dev_t->t_cez_w);
1529         gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
1530
1531         return 0;
1532 }
1533
1534 static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t,
1535                         struct gpmc_device_timings *dev_t)
1536 {
1537         u32 temp;
1538
1539         gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) *
1540                                                 gpmc_get_fclk_period();
1541
1542         gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk(
1543                                         dev_t->t_bacc,
1544                                         gpmc_t->sync_clk);
1545
1546         temp = max_t(u32, dev_t->t_ces, dev_t->t_avds);
1547         gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp);
1548
1549         if (gpmc_calc_divider(gpmc_t->sync_clk) != 1)
1550                 return 0;
1551
1552         if (dev_t->ce_xdelay)
1553                 gpmc_t->bool_timings.cs_extra_delay = true;
1554         if (dev_t->avd_xdelay)
1555                 gpmc_t->bool_timings.adv_extra_delay = true;
1556         if (dev_t->oe_xdelay)
1557                 gpmc_t->bool_timings.oe_extra_delay = true;
1558         if (dev_t->we_xdelay)
1559                 gpmc_t->bool_timings.we_extra_delay = true;
1560
1561         return 0;
1562 }
1563
1564 static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
1565                                     struct gpmc_device_timings *dev_t,
1566                                     bool sync)
1567 {
1568         u32 temp;
1569
1570         /* cs_on */
1571         gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu);
1572
1573         /* adv_on */
1574         temp = dev_t->t_avdasu;
1575         if (dev_t->t_ce_avd)
1576                 temp = max_t(u32, temp,
1577                                 gpmc_t->cs_on + dev_t->t_ce_avd);
1578         gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp);
1579
1580         if (sync)
1581                 gpmc_calc_sync_common_timings(gpmc_t, dev_t);
1582
1583         return 0;
1584 }
1585
1586 /* TODO: remove this function once all peripherals are confirmed to
1587  * work with generic timing. Simultaneously gpmc_cs_set_timings()
1588  * has to be modified to handle timings in ps instead of ns
1589 */
1590 static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
1591 {
1592         t->cs_on /= 1000;
1593         t->cs_rd_off /= 1000;
1594         t->cs_wr_off /= 1000;
1595         t->adv_on /= 1000;
1596         t->adv_rd_off /= 1000;
1597         t->adv_wr_off /= 1000;
1598         t->we_on /= 1000;
1599         t->we_off /= 1000;
1600         t->oe_on /= 1000;
1601         t->oe_off /= 1000;
1602         t->page_burst_access /= 1000;
1603         t->access /= 1000;
1604         t->rd_cycle /= 1000;
1605         t->wr_cycle /= 1000;
1606         t->bus_turnaround /= 1000;
1607         t->cycle2cycle_delay /= 1000;
1608         t->wait_monitoring /= 1000;
1609         t->clk_activation /= 1000;
1610         t->wr_access /= 1000;
1611         t->wr_data_mux_bus /= 1000;
1612 }
1613
1614 int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
1615                       struct gpmc_settings *gpmc_s,
1616                       struct gpmc_device_timings *dev_t)
1617 {
1618         bool mux = false, sync = false;
1619
1620         if (gpmc_s) {
1621                 mux = gpmc_s->mux_add_data ? true : false;
1622                 sync = (gpmc_s->sync_read || gpmc_s->sync_write);
1623         }
1624
1625         memset(gpmc_t, 0, sizeof(*gpmc_t));
1626
1627         gpmc_calc_common_timings(gpmc_t, dev_t, sync);
1628
1629         if (gpmc_s && gpmc_s->sync_read)
1630                 gpmc_calc_sync_read_timings(gpmc_t, dev_t, mux);
1631         else
1632                 gpmc_calc_async_read_timings(gpmc_t, dev_t, mux);
1633
1634         if (gpmc_s && gpmc_s->sync_write)
1635                 gpmc_calc_sync_write_timings(gpmc_t, dev_t, mux);
1636         else
1637                 gpmc_calc_async_write_timings(gpmc_t, dev_t, mux);
1638
1639         /* TODO: remove, see function definition */
1640         gpmc_convert_ps_to_ns(gpmc_t);
1641
1642         return 0;
1643 }
1644
1645 /**
1646  * gpmc_cs_program_settings - programs non-timing related settings
1647  * @cs:         GPMC chip-select to program
1648  * @p:          pointer to GPMC settings structure
1649  *
1650  * Programs non-timing related settings for a GPMC chip-select, such as
1651  * bus-width, burst configuration, etc. Function should be called once
1652  * for each chip-select that is being used and must be called before
1653  * calling gpmc_cs_set_timings() as timing parameters in the CONFIG1
1654  * register will be initialised to zero by this function. Returns 0 on
1655  * success and appropriate negative error code on failure.
1656  */
1657 int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
1658 {
1659         u32 config1;
1660
1661         if ((!p->device_width) || (p->device_width > GPMC_DEVWIDTH_16BIT)) {
1662                 pr_err("%s: invalid width %d!", __func__, p->device_width);
1663                 return -EINVAL;
1664         }
1665
1666         /* Address-data multiplexing not supported for NAND devices */
1667         if (p->device_nand && p->mux_add_data) {
1668                 pr_err("%s: invalid configuration!\n", __func__);
1669                 return -EINVAL;
1670         }
1671
1672         if ((p->mux_add_data > GPMC_MUX_AD) ||
1673             ((p->mux_add_data == GPMC_MUX_AAD) &&
1674              !(gpmc_capability & GPMC_HAS_MUX_AAD))) {
1675                 pr_err("%s: invalid multiplex configuration!\n", __func__);
1676                 return -EINVAL;
1677         }
1678
1679         /* Page/burst mode supports lengths of 4, 8 and 16 bytes */
1680         if (p->burst_read || p->burst_write) {
1681                 switch (p->burst_len) {
1682                 case GPMC_BURST_4:
1683                 case GPMC_BURST_8:
1684                 case GPMC_BURST_16:
1685                         break;
1686                 default:
1687                         pr_err("%s: invalid page/burst-length (%d)\n",
1688                                __func__, p->burst_len);
1689                         return -EINVAL;
1690                 }
1691         }
1692
1693         if (p->wait_pin > gpmc_nr_waitpins) {
1694                 pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
1695                 return -EINVAL;
1696         }
1697
1698         config1 = GPMC_CONFIG1_DEVICESIZE((p->device_width - 1));
1699
1700         if (p->sync_read)
1701                 config1 |= GPMC_CONFIG1_READTYPE_SYNC;
1702         if (p->sync_write)
1703                 config1 |= GPMC_CONFIG1_WRITETYPE_SYNC;
1704         if (p->wait_on_read)
1705                 config1 |= GPMC_CONFIG1_WAIT_READ_MON;
1706         if (p->wait_on_write)
1707                 config1 |= GPMC_CONFIG1_WAIT_WRITE_MON;
1708         if (p->wait_on_read || p->wait_on_write)
1709                 config1 |= GPMC_CONFIG1_WAIT_PIN_SEL(p->wait_pin);
1710         if (p->device_nand)
1711                 config1 |= GPMC_CONFIG1_DEVICETYPE(GPMC_DEVICETYPE_NAND);
1712         if (p->mux_add_data)
1713                 config1 |= GPMC_CONFIG1_MUXTYPE(p->mux_add_data);
1714         if (p->burst_read)
1715                 config1 |= GPMC_CONFIG1_READMULTIPLE_SUPP;
1716         if (p->burst_write)
1717                 config1 |= GPMC_CONFIG1_WRITEMULTIPLE_SUPP;
1718         if (p->burst_read || p->burst_write) {
1719                 config1 |= GPMC_CONFIG1_PAGE_LEN(p->burst_len >> 3);
1720                 config1 |= p->burst_wrap ? GPMC_CONFIG1_WRAPBURST_SUPP : 0;
1721         }
1722
1723         gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, config1);
1724
1725         return 0;
1726 }
1727
1728 #ifdef CONFIG_OF
1729 static const struct of_device_id gpmc_dt_ids[] = {
1730         { .compatible = "ti,omap2420-gpmc" },
1731         { .compatible = "ti,omap2430-gpmc" },
1732         { .compatible = "ti,omap3430-gpmc" },   /* omap3430 & omap3630 */
1733         { .compatible = "ti,omap4430-gpmc" },   /* omap4430 & omap4460 & omap543x */
1734         { .compatible = "ti,am3352-gpmc" },     /* am335x devices */
1735         { }
1736 };
1737 MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
1738
1739 /**
1740  * gpmc_read_settings_dt - read gpmc settings from device-tree
1741  * @np:         pointer to device-tree node for a gpmc child device
1742  * @p:          pointer to gpmc settings structure
1743  *
1744  * Reads the GPMC settings for a GPMC child device from device-tree and
1745  * stores them in the GPMC settings structure passed. The GPMC settings
1746  * structure is initialised to zero by this function and so any
1747  * previously stored settings will be cleared.
1748  */
1749 void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
1750 {
1751         memset(p, 0, sizeof(struct gpmc_settings));
1752
1753         p->sync_read = of_property_read_bool(np, "gpmc,sync-read");
1754         p->sync_write = of_property_read_bool(np, "gpmc,sync-write");
1755         of_property_read_u32(np, "gpmc,device-width", &p->device_width);
1756         of_property_read_u32(np, "gpmc,mux-add-data", &p->mux_add_data);
1757
1758         if (!of_property_read_u32(np, "gpmc,burst-length", &p->burst_len)) {
1759                 p->burst_wrap = of_property_read_bool(np, "gpmc,burst-wrap");
1760                 p->burst_read = of_property_read_bool(np, "gpmc,burst-read");
1761                 p->burst_write = of_property_read_bool(np, "gpmc,burst-write");
1762                 if (!p->burst_read && !p->burst_write)
1763                         pr_warn("%s: page/burst-length set but not used!\n",
1764                                 __func__);
1765         }
1766
1767         if (!of_property_read_u32(np, "gpmc,wait-pin", &p->wait_pin)) {
1768                 p->wait_on_read = of_property_read_bool(np,
1769                                                         "gpmc,wait-on-read");
1770                 p->wait_on_write = of_property_read_bool(np,
1771                                                          "gpmc,wait-on-write");
1772                 if (!p->wait_on_read && !p->wait_on_write)
1773                         pr_debug("%s: rd/wr wait monitoring not enabled!\n",
1774                                  __func__);
1775         }
1776 }
1777
1778 static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
1779                                                 struct gpmc_timings *gpmc_t)
1780 {
1781         struct gpmc_bool_timings *p;
1782
1783         if (!np || !gpmc_t)
1784                 return;
1785
1786         memset(gpmc_t, 0, sizeof(*gpmc_t));
1787
1788         /* minimum clock period for syncronous mode */
1789         of_property_read_u32(np, "gpmc,sync-clk-ps", &gpmc_t->sync_clk);
1790
1791         /* chip select timtings */
1792         of_property_read_u32(np, "gpmc,cs-on-ns", &gpmc_t->cs_on);
1793         of_property_read_u32(np, "gpmc,cs-rd-off-ns", &gpmc_t->cs_rd_off);
1794         of_property_read_u32(np, "gpmc,cs-wr-off-ns", &gpmc_t->cs_wr_off);
1795
1796         /* ADV signal timings */
1797         of_property_read_u32(np, "gpmc,adv-on-ns", &gpmc_t->adv_on);
1798         of_property_read_u32(np, "gpmc,adv-rd-off-ns", &gpmc_t->adv_rd_off);
1799         of_property_read_u32(np, "gpmc,adv-wr-off-ns", &gpmc_t->adv_wr_off);
1800         of_property_read_u32(np, "gpmc,adv-aad-mux-on-ns",
1801                              &gpmc_t->adv_aad_mux_on);
1802         of_property_read_u32(np, "gpmc,adv-aad-mux-rd-off-ns",
1803                              &gpmc_t->adv_aad_mux_rd_off);
1804         of_property_read_u32(np, "gpmc,adv-aad-mux-wr-off-ns",
1805                              &gpmc_t->adv_aad_mux_wr_off);
1806
1807         /* WE signal timings */
1808         of_property_read_u32(np, "gpmc,we-on-ns", &gpmc_t->we_on);
1809         of_property_read_u32(np, "gpmc,we-off-ns", &gpmc_t->we_off);
1810
1811         /* OE signal timings */
1812         of_property_read_u32(np, "gpmc,oe-on-ns", &gpmc_t->oe_on);
1813         of_property_read_u32(np, "gpmc,oe-off-ns", &gpmc_t->oe_off);
1814         of_property_read_u32(np, "gpmc,oe-aad-mux-on-ns",
1815                              &gpmc_t->oe_aad_mux_on);
1816         of_property_read_u32(np, "gpmc,oe-aad-mux-off-ns",
1817                              &gpmc_t->oe_aad_mux_off);
1818
1819         /* access and cycle timings */
1820         of_property_read_u32(np, "gpmc,page-burst-access-ns",
1821                              &gpmc_t->page_burst_access);
1822         of_property_read_u32(np, "gpmc,access-ns", &gpmc_t->access);
1823         of_property_read_u32(np, "gpmc,rd-cycle-ns", &gpmc_t->rd_cycle);
1824         of_property_read_u32(np, "gpmc,wr-cycle-ns", &gpmc_t->wr_cycle);
1825         of_property_read_u32(np, "gpmc,bus-turnaround-ns",
1826                              &gpmc_t->bus_turnaround);
1827         of_property_read_u32(np, "gpmc,cycle2cycle-delay-ns",
1828                              &gpmc_t->cycle2cycle_delay);
1829         of_property_read_u32(np, "gpmc,wait-monitoring-ns",
1830                              &gpmc_t->wait_monitoring);
1831         of_property_read_u32(np, "gpmc,clk-activation-ns",
1832                              &gpmc_t->clk_activation);
1833
1834         /* only applicable to OMAP3+ */
1835         of_property_read_u32(np, "gpmc,wr-access-ns", &gpmc_t->wr_access);
1836         of_property_read_u32(np, "gpmc,wr-data-mux-bus-ns",
1837                              &gpmc_t->wr_data_mux_bus);
1838
1839         /* bool timing parameters */
1840         p = &gpmc_t->bool_timings;
1841
1842         p->cycle2cyclediffcsen =
1843                 of_property_read_bool(np, "gpmc,cycle2cycle-diffcsen");
1844         p->cycle2cyclesamecsen =
1845                 of_property_read_bool(np, "gpmc,cycle2cycle-samecsen");
1846         p->we_extra_delay = of_property_read_bool(np, "gpmc,we-extra-delay");
1847         p->oe_extra_delay = of_property_read_bool(np, "gpmc,oe-extra-delay");
1848         p->adv_extra_delay = of_property_read_bool(np, "gpmc,adv-extra-delay");
1849         p->cs_extra_delay = of_property_read_bool(np, "gpmc,cs-extra-delay");
1850         p->time_para_granularity =
1851                 of_property_read_bool(np, "gpmc,time-para-granularity");
1852 }
1853
1854 #if IS_ENABLED(CONFIG_MTD_ONENAND)
1855 static int gpmc_probe_onenand_child(struct platform_device *pdev,
1856                                  struct device_node *child)
1857 {
1858         u32 val;
1859         struct omap_onenand_platform_data *gpmc_onenand_data;
1860
1861         if (of_property_read_u32(child, "reg", &val) < 0) {
1862                 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1863                         child->full_name);
1864                 return -ENODEV;
1865         }
1866
1867         gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
1868                                          GFP_KERNEL);
1869         if (!gpmc_onenand_data)
1870                 return -ENOMEM;
1871
1872         gpmc_onenand_data->cs = val;
1873         gpmc_onenand_data->of_node = child;
1874         gpmc_onenand_data->dma_channel = -1;
1875
1876         if (!of_property_read_u32(child, "dma-channel", &val))
1877                 gpmc_onenand_data->dma_channel = val;
1878
1879         gpmc_onenand_init(gpmc_onenand_data);
1880
1881         return 0;
1882 }
1883 #else
1884 static int gpmc_probe_onenand_child(struct platform_device *pdev,
1885                                     struct device_node *child)
1886 {
1887         return 0;
1888 }
1889 #endif
1890
1891 /**
1892  * gpmc_probe_generic_child - configures the gpmc for a child device
1893  * @pdev:       pointer to gpmc platform device
1894  * @child:      pointer to device-tree node for child device
1895  *
1896  * Allocates and configures a GPMC chip-select for a child device.
1897  * Returns 0 on success and appropriate negative error code on failure.
1898  */
1899 static int gpmc_probe_generic_child(struct platform_device *pdev,
1900                                 struct device_node *child)
1901 {
1902         struct gpmc_settings gpmc_s;
1903         struct gpmc_timings gpmc_t;
1904         struct resource res;
1905         unsigned long base;
1906         const char *name;
1907         int ret, cs;
1908         u32 val;
1909
1910         if (of_property_read_u32(child, "reg", &cs) < 0) {
1911                 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1912                         child->full_name);
1913                 return -ENODEV;
1914         }
1915
1916         if (of_address_to_resource(child, 0, &res) < 0) {
1917                 dev_err(&pdev->dev, "%s has malformed 'reg' property\n",
1918                         child->full_name);
1919                 return -ENODEV;
1920         }
1921
1922         /*
1923          * Check if we have multiple instances of the same device
1924          * on a single chip select. If so, use the already initialized
1925          * timings.
1926          */
1927         name = gpmc_cs_get_name(cs);
1928         if (name && child->name && of_node_cmp(child->name, name) == 0)
1929                         goto no_timings;
1930
1931         ret = gpmc_cs_request(cs, resource_size(&res), &base);
1932         if (ret < 0) {
1933                 dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs);
1934                 return ret;
1935         }
1936         gpmc_cs_set_name(cs, child->name);
1937
1938         gpmc_read_settings_dt(child, &gpmc_s);
1939         gpmc_read_timings_dt(child, &gpmc_t);
1940
1941         /*
1942          * For some GPMC devices we still need to rely on the bootloader
1943          * timings because the devices can be connected via FPGA.
1944          * REVISIT: Add timing support from slls644g.pdf.
1945          */
1946         if (!gpmc_t.cs_rd_off) {
1947                 WARN(1, "enable GPMC debug to configure .dts timings for CS%i\n",
1948                         cs);
1949                 gpmc_cs_show_timings(cs,
1950                                      "please add GPMC bootloader timings to .dts");
1951                 goto no_timings;
1952         }
1953
1954         /* CS must be disabled while making changes to gpmc configuration */
1955         gpmc_cs_disable_mem(cs);
1956
1957         /*
1958          * FIXME: gpmc_cs_request() will map the CS to an arbitary
1959          * location in the gpmc address space. When booting with
1960          * device-tree we want the NOR flash to be mapped to the
1961          * location specified in the device-tree blob. So remap the
1962          * CS to this location. Once DT migration is complete should
1963          * just make gpmc_cs_request() map a specific address.
1964          */
1965         ret = gpmc_cs_remap(cs, res.start);
1966         if (ret < 0) {
1967                 dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
1968                         cs, &res.start);
1969                 goto err;
1970         }
1971
1972         if (of_node_cmp(child->name, "nand") == 0) {
1973                 /* Warn about older DT blobs with no compatible property */
1974                 if (!of_property_read_bool(child, "compatible")) {
1975                         dev_warn(&pdev->dev,
1976                                  "Incompatible NAND node: missing compatible");
1977                         ret = -EINVAL;
1978                         goto err;
1979                 }
1980         }
1981
1982         if (of_device_is_compatible(child, "ti,omap2-nand")) {
1983                 /* NAND specific setup */
1984                 val = of_get_nand_bus_width(child);
1985                 switch (val) {
1986                 case 8:
1987                         gpmc_s.device_width = GPMC_DEVWIDTH_8BIT;
1988                         break;
1989                 case 16:
1990                         gpmc_s.device_width = GPMC_DEVWIDTH_16BIT;
1991                         break;
1992                 default:
1993                         dev_err(&pdev->dev, "%s: invalid 'nand-bus-width'\n",
1994                                 child->name);
1995                         ret = -EINVAL;
1996                         goto err;
1997                 }
1998
1999                 /* disable write protect */
2000                 gpmc_configure(GPMC_CONFIG_WP, 0);
2001                 gpmc_s.device_nand = true;
2002         } else {
2003                 ret = of_property_read_u32(child, "bank-width",
2004                                            &gpmc_s.device_width);
2005                 if (ret < 0)
2006                         goto err;
2007         }
2008
2009         gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings");
2010         ret = gpmc_cs_program_settings(cs, &gpmc_s);
2011         if (ret < 0)
2012                 goto err;
2013
2014         ret = gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
2015         if (ret) {
2016                 dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n",
2017                         child->name);
2018                 goto err;
2019         }
2020
2021         /* Clear limited address i.e. enable A26-A11 */
2022         val = gpmc_read_reg(GPMC_CONFIG);
2023         val &= ~GPMC_CONFIG_LIMITEDADDRESS;
2024         gpmc_write_reg(GPMC_CONFIG, val);
2025
2026         /* Enable CS region */
2027         gpmc_cs_enable_mem(cs);
2028
2029 no_timings:
2030
2031         /* create platform device, NULL on error or when disabled */
2032         if (!of_platform_device_create(child, NULL, &pdev->dev))
2033                 goto err_child_fail;
2034
2035         /* is child a common bus? */
2036         if (of_match_node(of_default_bus_match_table, child))
2037                 /* create children and other common bus children */
2038                 if (of_platform_populate(child, of_default_bus_match_table,
2039                                          NULL, &pdev->dev))
2040                         goto err_child_fail;
2041
2042         return 0;
2043
2044 err_child_fail:
2045
2046         dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
2047         ret = -ENODEV;
2048
2049 err:
2050         gpmc_cs_free(cs);
2051
2052         return ret;
2053 }
2054
2055 static int gpmc_probe_dt(struct platform_device *pdev)
2056 {
2057         int ret;
2058         struct device_node *child;
2059         const struct of_device_id *of_id =
2060                 of_match_device(gpmc_dt_ids, &pdev->dev);
2061
2062         if (!of_id)
2063                 return 0;
2064
2065         ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-cs",
2066                                    &gpmc_cs_num);
2067         if (ret < 0) {
2068                 pr_err("%s: number of chip-selects not defined\n", __func__);
2069                 return ret;
2070         } else if (gpmc_cs_num < 1) {
2071                 pr_err("%s: all chip-selects are disabled\n", __func__);
2072                 return -EINVAL;
2073         } else if (gpmc_cs_num > GPMC_CS_NUM) {
2074                 pr_err("%s: number of supported chip-selects cannot be > %d\n",
2075                                          __func__, GPMC_CS_NUM);
2076                 return -EINVAL;
2077         }
2078
2079         ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-waitpins",
2080                                    &gpmc_nr_waitpins);
2081         if (ret < 0) {
2082                 pr_err("%s: number of wait pins not found!\n", __func__);
2083                 return ret;
2084         }
2085
2086         for_each_available_child_of_node(pdev->dev.of_node, child) {
2087
2088                 if (!child->name)
2089                         continue;
2090
2091                 if (of_node_cmp(child->name, "onenand") == 0)
2092                         ret = gpmc_probe_onenand_child(pdev, child);
2093                 else
2094                         ret = gpmc_probe_generic_child(pdev, child);
2095         }
2096
2097         return 0;
2098 }
2099 #else
2100 static int gpmc_probe_dt(struct platform_device *pdev)
2101 {
2102         return 0;
2103 }
2104 #endif
2105
2106 static int gpmc_probe(struct platform_device *pdev)
2107 {
2108         int rc;
2109         u32 l;
2110         struct resource *res;
2111         struct gpmc_device *gpmc;
2112
2113         gpmc = devm_kzalloc(&pdev->dev, sizeof(*gpmc), GFP_KERNEL);
2114         if (!gpmc)
2115                 return -ENOMEM;
2116
2117         gpmc->dev = &pdev->dev;
2118         platform_set_drvdata(pdev, gpmc);
2119
2120         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2121         if (res == NULL)
2122                 return -ENOENT;
2123
2124         phys_base = res->start;
2125         mem_size = resource_size(res);
2126
2127         gpmc_base = devm_ioremap_resource(&pdev->dev, res);
2128         if (IS_ERR(gpmc_base))
2129                 return PTR_ERR(gpmc_base);
2130
2131         res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2132         if (!res) {
2133                 dev_err(&pdev->dev, "Failed to get resource: irq\n");
2134                 return -ENOENT;
2135         }
2136
2137         gpmc->irq = res->start;
2138
2139         gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck");
2140         if (IS_ERR(gpmc_l3_clk)) {
2141                 dev_err(&pdev->dev, "Failed to get GPMC fck\n");
2142                 return PTR_ERR(gpmc_l3_clk);
2143         }
2144
2145         if (!clk_get_rate(gpmc_l3_clk)) {
2146                 dev_err(&pdev->dev, "Invalid GPMC fck clock rate\n");
2147                 return -EINVAL;
2148         }
2149
2150         pm_runtime_enable(&pdev->dev);
2151         pm_runtime_get_sync(&pdev->dev);
2152
2153         l = gpmc_read_reg(GPMC_REVISION);
2154
2155         /*
2156          * FIXME: Once device-tree migration is complete the below flags
2157          * should be populated based upon the device-tree compatible
2158          * string. For now just use the IP revision. OMAP3+ devices have
2159          * the wr_access and wr_data_mux_bus register fields. OMAP4+
2160          * devices support the addr-addr-data multiplex protocol.
2161          *
2162          * GPMC IP revisions:
2163          * - OMAP24xx                   = 2.0
2164          * - OMAP3xxx                   = 5.0
2165          * - OMAP44xx/54xx/AM335x       = 6.0
2166          */
2167         if (GPMC_REVISION_MAJOR(l) > 0x4)
2168                 gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS;
2169         if (GPMC_REVISION_MAJOR(l) > 0x5)
2170                 gpmc_capability |= GPMC_HAS_MUX_AAD;
2171         dev_info(gpmc->dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
2172                  GPMC_REVISION_MINOR(l));
2173
2174         gpmc_mem_init();
2175
2176         rc = gpmc_setup_irq(gpmc);
2177         if (rc) {
2178                 dev_err(gpmc->dev, "gpmc_setup_irq failed\n");
2179                 goto fail;
2180         }
2181
2182         if (!pdev->dev.of_node) {
2183                 gpmc_cs_num      = GPMC_CS_NUM;
2184                 gpmc_nr_waitpins = GPMC_NR_WAITPINS;
2185         }
2186
2187         rc = gpmc_probe_dt(pdev);
2188         if (rc < 0) {
2189                 dev_err(gpmc->dev, "failed to probe DT parameters\n");
2190                 gpmc_free_irq(gpmc);
2191                 goto fail;
2192         }
2193
2194         return 0;
2195
2196 fail:
2197         pm_runtime_put_sync(&pdev->dev);
2198         return rc;
2199 }
2200
2201 static int gpmc_remove(struct platform_device *pdev)
2202 {
2203         struct gpmc_device *gpmc = platform_get_drvdata(pdev);
2204
2205         gpmc_free_irq(gpmc);
2206         gpmc_mem_exit();
2207         pm_runtime_put_sync(&pdev->dev);
2208         pm_runtime_disable(&pdev->dev);
2209
2210         return 0;
2211 }
2212
2213 #ifdef CONFIG_PM_SLEEP
2214 static int gpmc_suspend(struct device *dev)
2215 {
2216         omap3_gpmc_save_context();
2217         pm_runtime_put_sync(dev);
2218         return 0;
2219 }
2220
2221 static int gpmc_resume(struct device *dev)
2222 {
2223         pm_runtime_get_sync(dev);
2224         omap3_gpmc_restore_context();
2225         return 0;
2226 }
2227 #endif
2228
2229 static SIMPLE_DEV_PM_OPS(gpmc_pm_ops, gpmc_suspend, gpmc_resume);
2230
2231 static struct platform_driver gpmc_driver = {
2232         .probe          = gpmc_probe,
2233         .remove         = gpmc_remove,
2234         .driver         = {
2235                 .name   = DEVICE_NAME,
2236                 .of_match_table = of_match_ptr(gpmc_dt_ids),
2237                 .pm     = &gpmc_pm_ops,
2238         },
2239 };
2240
2241 static __init int gpmc_init(void)
2242 {
2243         return platform_driver_register(&gpmc_driver);
2244 }
2245
2246 static __exit void gpmc_exit(void)
2247 {
2248         platform_driver_unregister(&gpmc_driver);
2249
2250 }
2251
2252 postcore_initcall(gpmc_init);
2253 module_exit(gpmc_exit);
2254
2255 static struct omap3_gpmc_regs gpmc_context;
2256
2257 void omap3_gpmc_save_context(void)
2258 {
2259         int i;
2260
2261         if (!gpmc_base)
2262                 return;
2263
2264         gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
2265         gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
2266         gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
2267         gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
2268         gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
2269         gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
2270         gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
2271         for (i = 0; i < gpmc_cs_num; i++) {
2272                 gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
2273                 if (gpmc_context.cs_context[i].is_valid) {
2274                         gpmc_context.cs_context[i].config1 =
2275                                 gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
2276                         gpmc_context.cs_context[i].config2 =
2277                                 gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
2278                         gpmc_context.cs_context[i].config3 =
2279                                 gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
2280                         gpmc_context.cs_context[i].config4 =
2281                                 gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
2282                         gpmc_context.cs_context[i].config5 =
2283                                 gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
2284                         gpmc_context.cs_context[i].config6 =
2285                                 gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
2286                         gpmc_context.cs_context[i].config7 =
2287                                 gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
2288                 }
2289         }
2290 }
2291
2292 void omap3_gpmc_restore_context(void)
2293 {
2294         int i;
2295
2296         if (!gpmc_base)
2297                 return;
2298
2299         gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
2300         gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
2301         gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
2302         gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
2303         gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
2304         gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
2305         gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
2306         for (i = 0; i < gpmc_cs_num; i++) {
2307                 if (gpmc_context.cs_context[i].is_valid) {
2308                         gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
2309                                 gpmc_context.cs_context[i].config1);
2310                         gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
2311                                 gpmc_context.cs_context[i].config2);
2312                         gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
2313                                 gpmc_context.cs_context[i].config3);
2314                         gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
2315                                 gpmc_context.cs_context[i].config4);
2316                         gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
2317                                 gpmc_context.cs_context[i].config5);
2318                         gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
2319                                 gpmc_context.cs_context[i].config6);
2320                         gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
2321                                 gpmc_context.cs_context[i].config7);
2322                 }
2323         }
2324 }