Merge branch 'for-3.4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[cascardo/linux.git] / drivers / spi / spi-s3c64xx.c
1 /*
2  * Copyright (C) 2009 Samsung Electronics Ltd.
3  *      Jaswinder Singh <jassi.brar@samsung.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  */
19
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/workqueue.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25 #include <linux/clk.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/spi/spi.h>
30
31 #include <mach/dma.h>
32 #include <plat/s3c64xx-spi.h>
33
34 /* Registers and bit-fields */
35
36 #define S3C64XX_SPI_CH_CFG              0x00
37 #define S3C64XX_SPI_CLK_CFG             0x04
38 #define S3C64XX_SPI_MODE_CFG    0x08
39 #define S3C64XX_SPI_SLAVE_SEL   0x0C
40 #define S3C64XX_SPI_INT_EN              0x10
41 #define S3C64XX_SPI_STATUS              0x14
42 #define S3C64XX_SPI_TX_DATA             0x18
43 #define S3C64XX_SPI_RX_DATA             0x1C
44 #define S3C64XX_SPI_PACKET_CNT  0x20
45 #define S3C64XX_SPI_PENDING_CLR 0x24
46 #define S3C64XX_SPI_SWAP_CFG    0x28
47 #define S3C64XX_SPI_FB_CLK              0x2C
48
49 #define S3C64XX_SPI_CH_HS_EN            (1<<6)  /* High Speed Enable */
50 #define S3C64XX_SPI_CH_SW_RST           (1<<5)
51 #define S3C64XX_SPI_CH_SLAVE            (1<<4)
52 #define S3C64XX_SPI_CPOL_L              (1<<3)
53 #define S3C64XX_SPI_CPHA_B              (1<<2)
54 #define S3C64XX_SPI_CH_RXCH_ON          (1<<1)
55 #define S3C64XX_SPI_CH_TXCH_ON          (1<<0)
56
57 #define S3C64XX_SPI_CLKSEL_SRCMSK       (3<<9)
58 #define S3C64XX_SPI_CLKSEL_SRCSHFT      9
59 #define S3C64XX_SPI_ENCLK_ENABLE        (1<<8)
60 #define S3C64XX_SPI_PSR_MASK            0xff
61
62 #define S3C64XX_SPI_MODE_CH_TSZ_BYTE            (0<<29)
63 #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD        (1<<29)
64 #define S3C64XX_SPI_MODE_CH_TSZ_WORD            (2<<29)
65 #define S3C64XX_SPI_MODE_CH_TSZ_MASK            (3<<29)
66 #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE           (0<<17)
67 #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD       (1<<17)
68 #define S3C64XX_SPI_MODE_BUS_TSZ_WORD           (2<<17)
69 #define S3C64XX_SPI_MODE_BUS_TSZ_MASK           (3<<17)
70 #define S3C64XX_SPI_MODE_RXDMA_ON               (1<<2)
71 #define S3C64XX_SPI_MODE_TXDMA_ON               (1<<1)
72 #define S3C64XX_SPI_MODE_4BURST                 (1<<0)
73
74 #define S3C64XX_SPI_SLAVE_AUTO                  (1<<1)
75 #define S3C64XX_SPI_SLAVE_SIG_INACT             (1<<0)
76
77 #define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
78
79 #define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
80                                         (c)->regs + S3C64XX_SPI_SLAVE_SEL)
81
82 #define S3C64XX_SPI_INT_TRAILING_EN             (1<<6)
83 #define S3C64XX_SPI_INT_RX_OVERRUN_EN           (1<<5)
84 #define S3C64XX_SPI_INT_RX_UNDERRUN_EN          (1<<4)
85 #define S3C64XX_SPI_INT_TX_OVERRUN_EN           (1<<3)
86 #define S3C64XX_SPI_INT_TX_UNDERRUN_EN          (1<<2)
87 #define S3C64XX_SPI_INT_RX_FIFORDY_EN           (1<<1)
88 #define S3C64XX_SPI_INT_TX_FIFORDY_EN           (1<<0)
89
90 #define S3C64XX_SPI_ST_RX_OVERRUN_ERR           (1<<5)
91 #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR  (1<<4)
92 #define S3C64XX_SPI_ST_TX_OVERRUN_ERR           (1<<3)
93 #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR  (1<<2)
94 #define S3C64XX_SPI_ST_RX_FIFORDY               (1<<1)
95 #define S3C64XX_SPI_ST_TX_FIFORDY               (1<<0)
96
97 #define S3C64XX_SPI_PACKET_CNT_EN               (1<<16)
98
99 #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR         (1<<4)
100 #define S3C64XX_SPI_PND_TX_OVERRUN_CLR          (1<<3)
101 #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR         (1<<2)
102 #define S3C64XX_SPI_PND_RX_OVERRUN_CLR          (1<<1)
103 #define S3C64XX_SPI_PND_TRAILING_CLR            (1<<0)
104
105 #define S3C64XX_SPI_SWAP_RX_HALF_WORD           (1<<7)
106 #define S3C64XX_SPI_SWAP_RX_BYTE                (1<<6)
107 #define S3C64XX_SPI_SWAP_RX_BIT                 (1<<5)
108 #define S3C64XX_SPI_SWAP_RX_EN                  (1<<4)
109 #define S3C64XX_SPI_SWAP_TX_HALF_WORD           (1<<3)
110 #define S3C64XX_SPI_SWAP_TX_BYTE                (1<<2)
111 #define S3C64XX_SPI_SWAP_TX_BIT                 (1<<1)
112 #define S3C64XX_SPI_SWAP_TX_EN                  (1<<0)
113
114 #define S3C64XX_SPI_FBCLK_MSK           (3<<0)
115
116 #define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
117                                         (((i)->fifo_lvl_mask + 1))) \
118                                         ? 1 : 0)
119
120 #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & (1 << (i)->tx_st_done)) ? 1 : 0)
121 #define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
122 #define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
123
124 #define S3C64XX_SPI_MAX_TRAILCNT        0x3ff
125 #define S3C64XX_SPI_TRAILCNT_OFF        19
126
127 #define S3C64XX_SPI_TRAILCNT            S3C64XX_SPI_MAX_TRAILCNT
128
129 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
130
131 #define RXBUSY    (1<<2)
132 #define TXBUSY    (1<<3)
133
134 struct s3c64xx_spi_dma_data {
135         unsigned                ch;
136         enum dma_data_direction direction;
137         enum dma_ch     dmach;
138 };
139
140 /**
141  * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
142  * @clk: Pointer to the spi clock.
143  * @src_clk: Pointer to the clock used to generate SPI signals.
144  * @master: Pointer to the SPI Protocol master.
145  * @cntrlr_info: Platform specific data for the controller this driver manages.
146  * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
147  * @queue: To log SPI xfer requests.
148  * @lock: Controller specific lock.
149  * @state: Set of FLAGS to indicate status.
150  * @rx_dmach: Controller's DMA channel for Rx.
151  * @tx_dmach: Controller's DMA channel for Tx.
152  * @sfr_start: BUS address of SPI controller regs.
153  * @regs: Pointer to ioremap'ed controller registers.
154  * @irq: interrupt
155  * @xfer_completion: To indicate completion of xfer task.
156  * @cur_mode: Stores the active configuration of the controller.
157  * @cur_bpw: Stores the active bits per word settings.
158  * @cur_speed: Stores the active xfer clock speed.
159  */
160 struct s3c64xx_spi_driver_data {
161         void __iomem                    *regs;
162         struct clk                      *clk;
163         struct clk                      *src_clk;
164         struct platform_device          *pdev;
165         struct spi_master               *master;
166         struct s3c64xx_spi_info  *cntrlr_info;
167         struct spi_device               *tgl_spi;
168         struct list_head                queue;
169         spinlock_t                      lock;
170         unsigned long                   sfr_start;
171         struct completion               xfer_completion;
172         unsigned                        state;
173         unsigned                        cur_mode, cur_bpw;
174         unsigned                        cur_speed;
175         struct s3c64xx_spi_dma_data     rx_dma;
176         struct s3c64xx_spi_dma_data     tx_dma;
177         struct samsung_dma_ops          *ops;
178 };
179
180 static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
181         .name = "samsung-spi-dma",
182 };
183
184 static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
185 {
186         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
187         void __iomem *regs = sdd->regs;
188         unsigned long loops;
189         u32 val;
190
191         writel(0, regs + S3C64XX_SPI_PACKET_CNT);
192
193         val = readl(regs + S3C64XX_SPI_CH_CFG);
194         val |= S3C64XX_SPI_CH_SW_RST;
195         val &= ~S3C64XX_SPI_CH_HS_EN;
196         writel(val, regs + S3C64XX_SPI_CH_CFG);
197
198         /* Flush TxFIFO*/
199         loops = msecs_to_loops(1);
200         do {
201                 val = readl(regs + S3C64XX_SPI_STATUS);
202         } while (TX_FIFO_LVL(val, sci) && loops--);
203
204         if (loops == 0)
205                 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
206
207         /* Flush RxFIFO*/
208         loops = msecs_to_loops(1);
209         do {
210                 val = readl(regs + S3C64XX_SPI_STATUS);
211                 if (RX_FIFO_LVL(val, sci))
212                         readl(regs + S3C64XX_SPI_RX_DATA);
213                 else
214                         break;
215         } while (loops--);
216
217         if (loops == 0)
218                 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
219
220         val = readl(regs + S3C64XX_SPI_CH_CFG);
221         val &= ~S3C64XX_SPI_CH_SW_RST;
222         writel(val, regs + S3C64XX_SPI_CH_CFG);
223
224         val = readl(regs + S3C64XX_SPI_MODE_CFG);
225         val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
226         writel(val, regs + S3C64XX_SPI_MODE_CFG);
227
228         val = readl(regs + S3C64XX_SPI_CH_CFG);
229         val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
230         writel(val, regs + S3C64XX_SPI_CH_CFG);
231 }
232
233 static void s3c64xx_spi_dmacb(void *data)
234 {
235         struct s3c64xx_spi_driver_data *sdd;
236         struct s3c64xx_spi_dma_data *dma = data;
237         unsigned long flags;
238
239         if (dma->direction == DMA_DEV_TO_MEM)
240                 sdd = container_of(data,
241                         struct s3c64xx_spi_driver_data, rx_dma);
242         else
243                 sdd = container_of(data,
244                         struct s3c64xx_spi_driver_data, tx_dma);
245
246         spin_lock_irqsave(&sdd->lock, flags);
247
248         if (dma->direction == DMA_DEV_TO_MEM) {
249                 sdd->state &= ~RXBUSY;
250                 if (!(sdd->state & TXBUSY))
251                         complete(&sdd->xfer_completion);
252         } else {
253                 sdd->state &= ~TXBUSY;
254                 if (!(sdd->state & RXBUSY))
255                         complete(&sdd->xfer_completion);
256         }
257
258         spin_unlock_irqrestore(&sdd->lock, flags);
259 }
260
261 static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
262                                         unsigned len, dma_addr_t buf)
263 {
264         struct s3c64xx_spi_driver_data *sdd;
265         struct samsung_dma_prep_info info;
266
267         if (dma->direction == DMA_DEV_TO_MEM)
268                 sdd = container_of((void *)dma,
269                         struct s3c64xx_spi_driver_data, rx_dma);
270         else
271                 sdd = container_of((void *)dma,
272                         struct s3c64xx_spi_driver_data, tx_dma);
273
274         info.cap = DMA_SLAVE;
275         info.len = len;
276         info.fp = s3c64xx_spi_dmacb;
277         info.fp_param = dma;
278         info.direction = dma->direction;
279         info.buf = buf;
280
281         sdd->ops->prepare(dma->ch, &info);
282         sdd->ops->trigger(dma->ch);
283 }
284
285 static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
286 {
287         struct samsung_dma_info info;
288
289         sdd->ops = samsung_dma_get_ops();
290
291         info.cap = DMA_SLAVE;
292         info.client = &s3c64xx_spi_dma_client;
293         info.width = sdd->cur_bpw / 8;
294
295         info.direction = sdd->rx_dma.direction;
296         info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
297         sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &info);
298         info.direction =  sdd->tx_dma.direction;
299         info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
300         sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &info);
301
302         return 1;
303 }
304
305 static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
306                                 struct spi_device *spi,
307                                 struct spi_transfer *xfer, int dma_mode)
308 {
309         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
310         void __iomem *regs = sdd->regs;
311         u32 modecfg, chcfg;
312
313         modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
314         modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
315
316         chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
317         chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
318
319         if (dma_mode) {
320                 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
321         } else {
322                 /* Always shift in data in FIFO, even if xfer is Tx only,
323                  * this helps setting PCKT_CNT value for generating clocks
324                  * as exactly needed.
325                  */
326                 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
327                 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
328                                         | S3C64XX_SPI_PACKET_CNT_EN,
329                                         regs + S3C64XX_SPI_PACKET_CNT);
330         }
331
332         if (xfer->tx_buf != NULL) {
333                 sdd->state |= TXBUSY;
334                 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
335                 if (dma_mode) {
336                         modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
337                         prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
338                 } else {
339                         switch (sdd->cur_bpw) {
340                         case 32:
341                                 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
342                                         xfer->tx_buf, xfer->len / 4);
343                                 break;
344                         case 16:
345                                 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
346                                         xfer->tx_buf, xfer->len / 2);
347                                 break;
348                         default:
349                                 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
350                                         xfer->tx_buf, xfer->len);
351                                 break;
352                         }
353                 }
354         }
355
356         if (xfer->rx_buf != NULL) {
357                 sdd->state |= RXBUSY;
358
359                 if (sci->high_speed && sdd->cur_speed >= 30000000UL
360                                         && !(sdd->cur_mode & SPI_CPHA))
361                         chcfg |= S3C64XX_SPI_CH_HS_EN;
362
363                 if (dma_mode) {
364                         modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
365                         chcfg |= S3C64XX_SPI_CH_RXCH_ON;
366                         writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
367                                         | S3C64XX_SPI_PACKET_CNT_EN,
368                                         regs + S3C64XX_SPI_PACKET_CNT);
369                         prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
370                 }
371         }
372
373         writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
374         writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
375 }
376
377 static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
378                                                 struct spi_device *spi)
379 {
380         struct s3c64xx_spi_csinfo *cs;
381
382         if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
383                 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
384                         /* Deselect the last toggled device */
385                         cs = sdd->tgl_spi->controller_data;
386                         cs->set_level(cs->line,
387                                         spi->mode & SPI_CS_HIGH ? 0 : 1);
388                 }
389                 sdd->tgl_spi = NULL;
390         }
391
392         cs = spi->controller_data;
393         cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
394 }
395
396 static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
397                                 struct spi_transfer *xfer, int dma_mode)
398 {
399         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
400         void __iomem *regs = sdd->regs;
401         unsigned long val;
402         int ms;
403
404         /* millisecs to xfer 'len' bytes @ 'cur_speed' */
405         ms = xfer->len * 8 * 1000 / sdd->cur_speed;
406         ms += 10; /* some tolerance */
407
408         if (dma_mode) {
409                 val = msecs_to_jiffies(ms) + 10;
410                 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
411         } else {
412                 u32 status;
413                 val = msecs_to_loops(ms);
414                 do {
415                         status = readl(regs + S3C64XX_SPI_STATUS);
416                 } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
417         }
418
419         if (!val)
420                 return -EIO;
421
422         if (dma_mode) {
423                 u32 status;
424
425                 /*
426                  * DmaTx returns after simply writing data in the FIFO,
427                  * w/o waiting for real transmission on the bus to finish.
428                  * DmaRx returns only after Dma read data from FIFO which
429                  * needs bus transmission to finish, so we don't worry if
430                  * Xfer involved Rx(with or without Tx).
431                  */
432                 if (xfer->rx_buf == NULL) {
433                         val = msecs_to_loops(10);
434                         status = readl(regs + S3C64XX_SPI_STATUS);
435                         while ((TX_FIFO_LVL(status, sci)
436                                 || !S3C64XX_SPI_ST_TX_DONE(status, sci))
437                                         && --val) {
438                                 cpu_relax();
439                                 status = readl(regs + S3C64XX_SPI_STATUS);
440                         }
441
442                         if (!val)
443                                 return -EIO;
444                 }
445         } else {
446                 /* If it was only Tx */
447                 if (xfer->rx_buf == NULL) {
448                         sdd->state &= ~TXBUSY;
449                         return 0;
450                 }
451
452                 switch (sdd->cur_bpw) {
453                 case 32:
454                         ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
455                                 xfer->rx_buf, xfer->len / 4);
456                         break;
457                 case 16:
458                         ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
459                                 xfer->rx_buf, xfer->len / 2);
460                         break;
461                 default:
462                         ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
463                                 xfer->rx_buf, xfer->len);
464                         break;
465                 }
466                 sdd->state &= ~RXBUSY;
467         }
468
469         return 0;
470 }
471
472 static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
473                                                 struct spi_device *spi)
474 {
475         struct s3c64xx_spi_csinfo *cs = spi->controller_data;
476
477         if (sdd->tgl_spi == spi)
478                 sdd->tgl_spi = NULL;
479
480         cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
481 }
482
483 static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
484 {
485         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
486         void __iomem *regs = sdd->regs;
487         u32 val;
488
489         /* Disable Clock */
490         if (sci->clk_from_cmu) {
491                 clk_disable(sdd->src_clk);
492         } else {
493                 val = readl(regs + S3C64XX_SPI_CLK_CFG);
494                 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
495                 writel(val, regs + S3C64XX_SPI_CLK_CFG);
496         }
497
498         /* Set Polarity and Phase */
499         val = readl(regs + S3C64XX_SPI_CH_CFG);
500         val &= ~(S3C64XX_SPI_CH_SLAVE |
501                         S3C64XX_SPI_CPOL_L |
502                         S3C64XX_SPI_CPHA_B);
503
504         if (sdd->cur_mode & SPI_CPOL)
505                 val |= S3C64XX_SPI_CPOL_L;
506
507         if (sdd->cur_mode & SPI_CPHA)
508                 val |= S3C64XX_SPI_CPHA_B;
509
510         writel(val, regs + S3C64XX_SPI_CH_CFG);
511
512         /* Set Channel & DMA Mode */
513         val = readl(regs + S3C64XX_SPI_MODE_CFG);
514         val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
515                         | S3C64XX_SPI_MODE_CH_TSZ_MASK);
516
517         switch (sdd->cur_bpw) {
518         case 32:
519                 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
520                 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
521                 break;
522         case 16:
523                 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
524                 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
525                 break;
526         default:
527                 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
528                 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
529                 break;
530         }
531
532         writel(val, regs + S3C64XX_SPI_MODE_CFG);
533
534         if (sci->clk_from_cmu) {
535                 /* Configure Clock */
536                 /* There is half-multiplier before the SPI */
537                 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
538                 /* Enable Clock */
539                 clk_enable(sdd->src_clk);
540         } else {
541                 /* Configure Clock */
542                 val = readl(regs + S3C64XX_SPI_CLK_CFG);
543                 val &= ~S3C64XX_SPI_PSR_MASK;
544                 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
545                                 & S3C64XX_SPI_PSR_MASK);
546                 writel(val, regs + S3C64XX_SPI_CLK_CFG);
547
548                 /* Enable Clock */
549                 val = readl(regs + S3C64XX_SPI_CLK_CFG);
550                 val |= S3C64XX_SPI_ENCLK_ENABLE;
551                 writel(val, regs + S3C64XX_SPI_CLK_CFG);
552         }
553 }
554
555 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
556
557 static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
558                                                 struct spi_message *msg)
559 {
560         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
561         struct device *dev = &sdd->pdev->dev;
562         struct spi_transfer *xfer;
563
564         if (msg->is_dma_mapped)
565                 return 0;
566
567         /* First mark all xfer unmapped */
568         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
569                 xfer->rx_dma = XFER_DMAADDR_INVALID;
570                 xfer->tx_dma = XFER_DMAADDR_INVALID;
571         }
572
573         /* Map until end or first fail */
574         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
575
576                 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
577                         continue;
578
579                 if (xfer->tx_buf != NULL) {
580                         xfer->tx_dma = dma_map_single(dev,
581                                         (void *)xfer->tx_buf, xfer->len,
582                                         DMA_TO_DEVICE);
583                         if (dma_mapping_error(dev, xfer->tx_dma)) {
584                                 dev_err(dev, "dma_map_single Tx failed\n");
585                                 xfer->tx_dma = XFER_DMAADDR_INVALID;
586                                 return -ENOMEM;
587                         }
588                 }
589
590                 if (xfer->rx_buf != NULL) {
591                         xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
592                                                 xfer->len, DMA_FROM_DEVICE);
593                         if (dma_mapping_error(dev, xfer->rx_dma)) {
594                                 dev_err(dev, "dma_map_single Rx failed\n");
595                                 dma_unmap_single(dev, xfer->tx_dma,
596                                                 xfer->len, DMA_TO_DEVICE);
597                                 xfer->tx_dma = XFER_DMAADDR_INVALID;
598                                 xfer->rx_dma = XFER_DMAADDR_INVALID;
599                                 return -ENOMEM;
600                         }
601                 }
602         }
603
604         return 0;
605 }
606
607 static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
608                                                 struct spi_message *msg)
609 {
610         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
611         struct device *dev = &sdd->pdev->dev;
612         struct spi_transfer *xfer;
613
614         if (msg->is_dma_mapped)
615                 return;
616
617         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
618
619                 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
620                         continue;
621
622                 if (xfer->rx_buf != NULL
623                                 && xfer->rx_dma != XFER_DMAADDR_INVALID)
624                         dma_unmap_single(dev, xfer->rx_dma,
625                                                 xfer->len, DMA_FROM_DEVICE);
626
627                 if (xfer->tx_buf != NULL
628                                 && xfer->tx_dma != XFER_DMAADDR_INVALID)
629                         dma_unmap_single(dev, xfer->tx_dma,
630                                                 xfer->len, DMA_TO_DEVICE);
631         }
632 }
633
634 static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
635                                             struct spi_message *msg)
636 {
637         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
638         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
639         struct spi_device *spi = msg->spi;
640         struct s3c64xx_spi_csinfo *cs = spi->controller_data;
641         struct spi_transfer *xfer;
642         int status = 0, cs_toggle = 0;
643         u32 speed;
644         u8 bpw;
645
646         /* If Master's(controller) state differs from that needed by Slave */
647         if (sdd->cur_speed != spi->max_speed_hz
648                         || sdd->cur_mode != spi->mode
649                         || sdd->cur_bpw != spi->bits_per_word) {
650                 sdd->cur_bpw = spi->bits_per_word;
651                 sdd->cur_speed = spi->max_speed_hz;
652                 sdd->cur_mode = spi->mode;
653                 s3c64xx_spi_config(sdd);
654         }
655
656         /* Map all the transfers if needed */
657         if (s3c64xx_spi_map_mssg(sdd, msg)) {
658                 dev_err(&spi->dev,
659                         "Xfer: Unable to map message buffers!\n");
660                 status = -ENOMEM;
661                 goto out;
662         }
663
664         /* Configure feedback delay */
665         writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
666
667         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
668
669                 unsigned long flags;
670                 int use_dma;
671
672                 INIT_COMPLETION(sdd->xfer_completion);
673
674                 /* Only BPW and Speed may change across transfers */
675                 bpw = xfer->bits_per_word ? : spi->bits_per_word;
676                 speed = xfer->speed_hz ? : spi->max_speed_hz;
677
678                 if (xfer->len % (bpw / 8)) {
679                         dev_err(&spi->dev,
680                                 "Xfer length(%u) not a multiple of word size(%u)\n",
681                                 xfer->len, bpw / 8);
682                         status = -EIO;
683                         goto out;
684                 }
685
686                 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
687                         sdd->cur_bpw = bpw;
688                         sdd->cur_speed = speed;
689                         s3c64xx_spi_config(sdd);
690                 }
691
692                 /* Polling method for xfers not bigger than FIFO capacity */
693                 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
694                         use_dma = 0;
695                 else
696                         use_dma = 1;
697
698                 spin_lock_irqsave(&sdd->lock, flags);
699
700                 /* Pending only which is to be done */
701                 sdd->state &= ~RXBUSY;
702                 sdd->state &= ~TXBUSY;
703
704                 enable_datapath(sdd, spi, xfer, use_dma);
705
706                 /* Slave Select */
707                 enable_cs(sdd, spi);
708
709                 /* Start the signals */
710                 S3C64XX_SPI_ACT(sdd);
711
712                 spin_unlock_irqrestore(&sdd->lock, flags);
713
714                 status = wait_for_xfer(sdd, xfer, use_dma);
715
716                 /* Quiese the signals */
717                 S3C64XX_SPI_DEACT(sdd);
718
719                 if (status) {
720                         dev_err(&spi->dev, "I/O Error: "
721                                 "rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
722                                 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
723                                 (sdd->state & RXBUSY) ? 'f' : 'p',
724                                 (sdd->state & TXBUSY) ? 'f' : 'p',
725                                 xfer->len);
726
727                         if (use_dma) {
728                                 if (xfer->tx_buf != NULL
729                                                 && (sdd->state & TXBUSY))
730                                         sdd->ops->stop(sdd->tx_dma.ch);
731                                 if (xfer->rx_buf != NULL
732                                                 && (sdd->state & RXBUSY))
733                                         sdd->ops->stop(sdd->rx_dma.ch);
734                         }
735
736                         goto out;
737                 }
738
739                 if (xfer->delay_usecs)
740                         udelay(xfer->delay_usecs);
741
742                 if (xfer->cs_change) {
743                         /* Hint that the next mssg is gonna be
744                            for the same device */
745                         if (list_is_last(&xfer->transfer_list,
746                                                 &msg->transfers))
747                                 cs_toggle = 1;
748                         else
749                                 disable_cs(sdd, spi);
750                 }
751
752                 msg->actual_length += xfer->len;
753
754                 flush_fifo(sdd);
755         }
756
757 out:
758         if (!cs_toggle || status)
759                 disable_cs(sdd, spi);
760         else
761                 sdd->tgl_spi = spi;
762
763         s3c64xx_spi_unmap_mssg(sdd, msg);
764
765         msg->status = status;
766
767         spi_finalize_current_message(master);
768
769         return 0;
770 }
771
772 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
773 {
774         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
775
776         /* Acquire DMA channels */
777         while (!acquire_dma(sdd))
778                 msleep(10);
779
780         pm_runtime_get_sync(&sdd->pdev->dev);
781
782         return 0;
783 }
784
785 static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
786 {
787         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
788
789         /* Free DMA channels */
790         sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
791         sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
792
793         pm_runtime_put(&sdd->pdev->dev);
794
795         return 0;
796 }
797
798 /*
799  * Here we only check the validity of requested configuration
800  * and save the configuration in a local data-structure.
801  * The controller is actually configured only just before we
802  * get a message to transfer.
803  */
804 static int s3c64xx_spi_setup(struct spi_device *spi)
805 {
806         struct s3c64xx_spi_csinfo *cs = spi->controller_data;
807         struct s3c64xx_spi_driver_data *sdd;
808         struct s3c64xx_spi_info *sci;
809         struct spi_message *msg;
810         unsigned long flags;
811         int err = 0;
812
813         if (cs == NULL || cs->set_level == NULL) {
814                 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
815                 return -ENODEV;
816         }
817
818         sdd = spi_master_get_devdata(spi->master);
819         sci = sdd->cntrlr_info;
820
821         spin_lock_irqsave(&sdd->lock, flags);
822
823         list_for_each_entry(msg, &sdd->queue, queue) {
824                 /* Is some mssg is already queued for this device */
825                 if (msg->spi == spi) {
826                         dev_err(&spi->dev,
827                                 "setup: attempt while mssg in queue!\n");
828                         spin_unlock_irqrestore(&sdd->lock, flags);
829                         return -EBUSY;
830                 }
831         }
832
833         spin_unlock_irqrestore(&sdd->lock, flags);
834
835         if (spi->bits_per_word != 8
836                         && spi->bits_per_word != 16
837                         && spi->bits_per_word != 32) {
838                 dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
839                                                         spi->bits_per_word);
840                 err = -EINVAL;
841                 goto setup_exit;
842         }
843
844         pm_runtime_get_sync(&sdd->pdev->dev);
845
846         /* Check if we can provide the requested rate */
847         if (!sci->clk_from_cmu) {
848                 u32 psr, speed;
849
850                 /* Max possible */
851                 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
852
853                 if (spi->max_speed_hz > speed)
854                         spi->max_speed_hz = speed;
855
856                 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
857                 psr &= S3C64XX_SPI_PSR_MASK;
858                 if (psr == S3C64XX_SPI_PSR_MASK)
859                         psr--;
860
861                 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
862                 if (spi->max_speed_hz < speed) {
863                         if (psr+1 < S3C64XX_SPI_PSR_MASK) {
864                                 psr++;
865                         } else {
866                                 err = -EINVAL;
867                                 goto setup_exit;
868                         }
869                 }
870
871                 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
872                 if (spi->max_speed_hz >= speed)
873                         spi->max_speed_hz = speed;
874                 else
875                         err = -EINVAL;
876         }
877
878         pm_runtime_put(&sdd->pdev->dev);
879
880 setup_exit:
881
882         /* setup() returns with device de-selected */
883         disable_cs(sdd, spi);
884
885         return err;
886 }
887
888 static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
889 {
890         struct s3c64xx_spi_driver_data *sdd = data;
891         struct spi_master *spi = sdd->master;
892         unsigned int val;
893
894         val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR);
895
896         val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR |
897                 S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
898                 S3C64XX_SPI_PND_TX_OVERRUN_CLR |
899                 S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
900
901         writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR);
902
903         if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR)
904                 dev_err(&spi->dev, "RX overrun\n");
905         if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR)
906                 dev_err(&spi->dev, "RX underrun\n");
907         if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR)
908                 dev_err(&spi->dev, "TX overrun\n");
909         if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR)
910                 dev_err(&spi->dev, "TX underrun\n");
911
912         return IRQ_HANDLED;
913 }
914
915 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
916 {
917         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
918         void __iomem *regs = sdd->regs;
919         unsigned int val;
920
921         sdd->cur_speed = 0;
922
923         S3C64XX_SPI_DEACT(sdd);
924
925         /* Disable Interrupts - we use Polling if not DMA mode */
926         writel(0, regs + S3C64XX_SPI_INT_EN);
927
928         if (!sci->clk_from_cmu)
929                 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
930                                 regs + S3C64XX_SPI_CLK_CFG);
931         writel(0, regs + S3C64XX_SPI_MODE_CFG);
932         writel(0, regs + S3C64XX_SPI_PACKET_CNT);
933
934         /* Clear any irq pending bits */
935         writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
936                                 regs + S3C64XX_SPI_PENDING_CLR);
937
938         writel(0, regs + S3C64XX_SPI_SWAP_CFG);
939
940         val = readl(regs + S3C64XX_SPI_MODE_CFG);
941         val &= ~S3C64XX_SPI_MODE_4BURST;
942         val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
943         val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
944         writel(val, regs + S3C64XX_SPI_MODE_CFG);
945
946         flush_fifo(sdd);
947 }
948
949 static int __init s3c64xx_spi_probe(struct platform_device *pdev)
950 {
951         struct resource *mem_res, *dmatx_res, *dmarx_res;
952         struct s3c64xx_spi_driver_data *sdd;
953         struct s3c64xx_spi_info *sci;
954         struct spi_master *master;
955         int ret, irq;
956         char clk_name[16];
957
958         if (pdev->id < 0) {
959                 dev_err(&pdev->dev,
960                                 "Invalid platform device id-%d\n", pdev->id);
961                 return -ENODEV;
962         }
963
964         if (pdev->dev.platform_data == NULL) {
965                 dev_err(&pdev->dev, "platform_data missing!\n");
966                 return -ENODEV;
967         }
968
969         sci = pdev->dev.platform_data;
970
971         /* Check for availability of necessary resource */
972
973         dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
974         if (dmatx_res == NULL) {
975                 dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
976                 return -ENXIO;
977         }
978
979         dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
980         if (dmarx_res == NULL) {
981                 dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
982                 return -ENXIO;
983         }
984
985         mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
986         if (mem_res == NULL) {
987                 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
988                 return -ENXIO;
989         }
990
991         irq = platform_get_irq(pdev, 0);
992         if (irq < 0) {
993                 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
994                 return irq;
995         }
996
997         master = spi_alloc_master(&pdev->dev,
998                                 sizeof(struct s3c64xx_spi_driver_data));
999         if (master == NULL) {
1000                 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
1001                 return -ENOMEM;
1002         }
1003
1004         platform_set_drvdata(pdev, master);
1005
1006         sdd = spi_master_get_devdata(master);
1007         sdd->master = master;
1008         sdd->cntrlr_info = sci;
1009         sdd->pdev = pdev;
1010         sdd->sfr_start = mem_res->start;
1011         sdd->tx_dma.dmach = dmatx_res->start;
1012         sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1013         sdd->rx_dma.dmach = dmarx_res->start;
1014         sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1015
1016         sdd->cur_bpw = 8;
1017
1018         master->bus_num = pdev->id;
1019         master->setup = s3c64xx_spi_setup;
1020         master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1021         master->transfer_one_message = s3c64xx_spi_transfer_one_message;
1022         master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
1023         master->num_chipselect = sci->num_cs;
1024         master->dma_alignment = 8;
1025         /* the spi->mode bits understood by this driver: */
1026         master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1027
1028         if (request_mem_region(mem_res->start,
1029                         resource_size(mem_res), pdev->name) == NULL) {
1030                 dev_err(&pdev->dev, "Req mem region failed\n");
1031                 ret = -ENXIO;
1032                 goto err0;
1033         }
1034
1035         sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
1036         if (sdd->regs == NULL) {
1037                 dev_err(&pdev->dev, "Unable to remap IO\n");
1038                 ret = -ENXIO;
1039                 goto err1;
1040         }
1041
1042         if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
1043                 dev_err(&pdev->dev, "Unable to config gpio\n");
1044                 ret = -EBUSY;
1045                 goto err2;
1046         }
1047
1048         /* Setup clocks */
1049         sdd->clk = clk_get(&pdev->dev, "spi");
1050         if (IS_ERR(sdd->clk)) {
1051                 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1052                 ret = PTR_ERR(sdd->clk);
1053                 goto err3;
1054         }
1055
1056         if (clk_enable(sdd->clk)) {
1057                 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1058                 ret = -EBUSY;
1059                 goto err4;
1060         }
1061
1062         sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
1063         sdd->src_clk = clk_get(&pdev->dev, clk_name);
1064         if (IS_ERR(sdd->src_clk)) {
1065                 dev_err(&pdev->dev,
1066                         "Unable to acquire clock '%s'\n", clk_name);
1067                 ret = PTR_ERR(sdd->src_clk);
1068                 goto err5;
1069         }
1070
1071         if (clk_enable(sdd->src_clk)) {
1072                 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
1073                 ret = -EBUSY;
1074                 goto err6;
1075         }
1076
1077         /* Setup Deufult Mode */
1078         s3c64xx_spi_hwinit(sdd, pdev->id);
1079
1080         spin_lock_init(&sdd->lock);
1081         init_completion(&sdd->xfer_completion);
1082         INIT_LIST_HEAD(&sdd->queue);
1083
1084         ret = request_irq(irq, s3c64xx_spi_irq, 0, "spi-s3c64xx", sdd);
1085         if (ret != 0) {
1086                 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1087                         irq, ret);
1088                 goto err7;
1089         }
1090
1091         writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1092                S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1093                sdd->regs + S3C64XX_SPI_INT_EN);
1094
1095         if (spi_register_master(master)) {
1096                 dev_err(&pdev->dev, "cannot register SPI master\n");
1097                 ret = -EBUSY;
1098                 goto err8;
1099         }
1100
1101         dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
1102                                         "with %d Slaves attached\n",
1103                                         pdev->id, master->num_chipselect);
1104         dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
1105                                         mem_res->end, mem_res->start,
1106                                         sdd->rx_dma.dmach, sdd->tx_dma.dmach);
1107
1108         pm_runtime_enable(&pdev->dev);
1109
1110         return 0;
1111
1112 err8:
1113         free_irq(irq, sdd);
1114 err7:
1115         clk_disable(sdd->src_clk);
1116 err6:
1117         clk_put(sdd->src_clk);
1118 err5:
1119         clk_disable(sdd->clk);
1120 err4:
1121         clk_put(sdd->clk);
1122 err3:
1123 err2:
1124         iounmap((void *) sdd->regs);
1125 err1:
1126         release_mem_region(mem_res->start, resource_size(mem_res));
1127 err0:
1128         platform_set_drvdata(pdev, NULL);
1129         spi_master_put(master);
1130
1131         return ret;
1132 }
1133
1134 static int s3c64xx_spi_remove(struct platform_device *pdev)
1135 {
1136         struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1137         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1138         struct resource *mem_res;
1139
1140         pm_runtime_disable(&pdev->dev);
1141
1142         spi_unregister_master(master);
1143
1144         writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1145
1146         free_irq(platform_get_irq(pdev, 0), sdd);
1147
1148         clk_disable(sdd->src_clk);
1149         clk_put(sdd->src_clk);
1150
1151         clk_disable(sdd->clk);
1152         clk_put(sdd->clk);
1153
1154         iounmap((void *) sdd->regs);
1155
1156         mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1157         if (mem_res != NULL)
1158                 release_mem_region(mem_res->start, resource_size(mem_res));
1159
1160         platform_set_drvdata(pdev, NULL);
1161         spi_master_put(master);
1162
1163         return 0;
1164 }
1165
1166 #ifdef CONFIG_PM
1167 static int s3c64xx_spi_suspend(struct device *dev)
1168 {
1169         struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
1170         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1171
1172         spi_master_suspend(master);
1173
1174         /* Disable the clock */
1175         clk_disable(sdd->src_clk);
1176         clk_disable(sdd->clk);
1177
1178         sdd->cur_speed = 0; /* Output Clock is stopped */
1179
1180         return 0;
1181 }
1182
1183 static int s3c64xx_spi_resume(struct device *dev)
1184 {
1185         struct platform_device *pdev = to_platform_device(dev);
1186         struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
1187         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1188         struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1189
1190         sci->cfg_gpio(pdev);
1191
1192         /* Enable the clock */
1193         clk_enable(sdd->src_clk);
1194         clk_enable(sdd->clk);
1195
1196         s3c64xx_spi_hwinit(sdd, pdev->id);
1197
1198         spi_master_resume(master);
1199
1200         return 0;
1201 }
1202 #endif /* CONFIG_PM */
1203
1204 #ifdef CONFIG_PM_RUNTIME
1205 static int s3c64xx_spi_runtime_suspend(struct device *dev)
1206 {
1207         struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
1208         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1209
1210         clk_disable(sdd->clk);
1211         clk_disable(sdd->src_clk);
1212
1213         return 0;
1214 }
1215
1216 static int s3c64xx_spi_runtime_resume(struct device *dev)
1217 {
1218         struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
1219         struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1220
1221         clk_enable(sdd->src_clk);
1222         clk_enable(sdd->clk);
1223
1224         return 0;
1225 }
1226 #endif /* CONFIG_PM_RUNTIME */
1227
1228 static const struct dev_pm_ops s3c64xx_spi_pm = {
1229         SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
1230         SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
1231                            s3c64xx_spi_runtime_resume, NULL)
1232 };
1233
1234 static struct platform_driver s3c64xx_spi_driver = {
1235         .driver = {
1236                 .name   = "s3c64xx-spi",
1237                 .owner = THIS_MODULE,
1238                 .pm = &s3c64xx_spi_pm,
1239         },
1240         .remove = s3c64xx_spi_remove,
1241 };
1242 MODULE_ALIAS("platform:s3c64xx-spi");
1243
1244 static int __init s3c64xx_spi_init(void)
1245 {
1246         return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1247 }
1248 subsys_initcall(s3c64xx_spi_init);
1249
1250 static void __exit s3c64xx_spi_exit(void)
1251 {
1252         platform_driver_unregister(&s3c64xx_spi_driver);
1253 }
1254 module_exit(s3c64xx_spi_exit);
1255
1256 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1257 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1258 MODULE_LICENSE("GPL");