serial: imx: reorder functions and simplify a bit
[cascardo/linux.git] / drivers / tty / serial / imx.c
1 /*
2  * Driver for Motorola/Freescale IMX serial ports
3  *
4  * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
5  *
6  * Author: Sascha Hauer <sascha@saschahauer.de>
7  * Copyright (C) 2004 Pengutronix
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
21 #define SUPPORT_SYSRQ
22 #endif
23
24 #include <linux/module.h>
25 #include <linux/ioport.h>
26 #include <linux/init.h>
27 #include <linux/console.h>
28 #include <linux/sysrq.h>
29 #include <linux/platform_device.h>
30 #include <linux/tty.h>
31 #include <linux/tty_flip.h>
32 #include <linux/serial_core.h>
33 #include <linux/serial.h>
34 #include <linux/clk.h>
35 #include <linux/delay.h>
36 #include <linux/rational.h>
37 #include <linux/slab.h>
38 #include <linux/of.h>
39 #include <linux/of_device.h>
40 #include <linux/io.h>
41 #include <linux/dma-mapping.h>
42
43 #include <asm/irq.h>
44 #include <linux/platform_data/serial-imx.h>
45 #include <linux/platform_data/dma-imx.h>
46
47 /* Register definitions */
48 #define URXD0 0x0  /* Receiver Register */
49 #define URTX0 0x40 /* Transmitter Register */
50 #define UCR1  0x80 /* Control Register 1 */
51 #define UCR2  0x84 /* Control Register 2 */
52 #define UCR3  0x88 /* Control Register 3 */
53 #define UCR4  0x8c /* Control Register 4 */
54 #define UFCR  0x90 /* FIFO Control Register */
55 #define USR1  0x94 /* Status Register 1 */
56 #define USR2  0x98 /* Status Register 2 */
57 #define UESC  0x9c /* Escape Character Register */
58 #define UTIM  0xa0 /* Escape Timer Register */
59 #define UBIR  0xa4 /* BRM Incremental Register */
60 #define UBMR  0xa8 /* BRM Modulator Register */
61 #define UBRC  0xac /* Baud Rate Count Register */
62 #define IMX21_ONEMS 0xb0 /* One Millisecond register */
63 #define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
64 #define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
65
66 /* UART Control Register Bit Fields.*/
67 #define URXD_DUMMY_READ (1<<16)
68 #define URXD_CHARRDY    (1<<15)
69 #define URXD_ERR        (1<<14)
70 #define URXD_OVRRUN     (1<<13)
71 #define URXD_FRMERR     (1<<12)
72 #define URXD_BRK        (1<<11)
73 #define URXD_PRERR      (1<<10)
74 #define URXD_RX_DATA    (0xFF<<0)
75 #define UCR1_ADEN       (1<<15) /* Auto detect interrupt */
76 #define UCR1_ADBR       (1<<14) /* Auto detect baud rate */
77 #define UCR1_TRDYEN     (1<<13) /* Transmitter ready interrupt enable */
78 #define UCR1_IDEN       (1<<12) /* Idle condition interrupt */
79 #define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
80 #define UCR1_RRDYEN     (1<<9)  /* Recv ready interrupt enable */
81 #define UCR1_RDMAEN     (1<<8)  /* Recv ready DMA enable */
82 #define UCR1_IREN       (1<<7)  /* Infrared interface enable */
83 #define UCR1_TXMPTYEN   (1<<6)  /* Transimitter empty interrupt enable */
84 #define UCR1_RTSDEN     (1<<5)  /* RTS delta interrupt enable */
85 #define UCR1_SNDBRK     (1<<4)  /* Send break */
86 #define UCR1_TDMAEN     (1<<3)  /* Transmitter ready DMA enable */
87 #define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
88 #define UCR1_ATDMAEN    (1<<2)  /* Aging DMA Timer Enable */
89 #define UCR1_DOZE       (1<<1)  /* Doze */
90 #define UCR1_UARTEN     (1<<0)  /* UART enabled */
91 #define UCR2_ESCI       (1<<15) /* Escape seq interrupt enable */
92 #define UCR2_IRTS       (1<<14) /* Ignore RTS pin */
93 #define UCR2_CTSC       (1<<13) /* CTS pin control */
94 #define UCR2_CTS        (1<<12) /* Clear to send */
95 #define UCR2_ESCEN      (1<<11) /* Escape enable */
96 #define UCR2_PREN       (1<<8)  /* Parity enable */
97 #define UCR2_PROE       (1<<7)  /* Parity odd/even */
98 #define UCR2_STPB       (1<<6)  /* Stop */
99 #define UCR2_WS         (1<<5)  /* Word size */
100 #define UCR2_RTSEN      (1<<4)  /* Request to send interrupt enable */
101 #define UCR2_ATEN       (1<<3)  /* Aging Timer Enable */
102 #define UCR2_TXEN       (1<<2)  /* Transmitter enabled */
103 #define UCR2_RXEN       (1<<1)  /* Receiver enabled */
104 #define UCR2_SRST       (1<<0)  /* SW reset */
105 #define UCR3_DTREN      (1<<13) /* DTR interrupt enable */
106 #define UCR3_PARERREN   (1<<12) /* Parity enable */
107 #define UCR3_FRAERREN   (1<<11) /* Frame error interrupt enable */
108 #define UCR3_DSR        (1<<10) /* Data set ready */
109 #define UCR3_DCD        (1<<9)  /* Data carrier detect */
110 #define UCR3_RI         (1<<8)  /* Ring indicator */
111 #define UCR3_ADNIMP     (1<<7)  /* Autobaud Detection Not Improved */
112 #define UCR3_RXDSEN     (1<<6)  /* Receive status interrupt enable */
113 #define UCR3_AIRINTEN   (1<<5)  /* Async IR wake interrupt enable */
114 #define UCR3_AWAKEN     (1<<4)  /* Async wake interrupt enable */
115 #define IMX21_UCR3_RXDMUXSEL    (1<<2)  /* RXD Muxed Input Select */
116 #define UCR3_INVT       (1<<1)  /* Inverted Infrared transmission */
117 #define UCR3_BPEN       (1<<0)  /* Preset registers enable */
118 #define UCR4_CTSTL_SHF  10      /* CTS trigger level shift */
119 #define UCR4_CTSTL_MASK 0x3F    /* CTS trigger is 6 bits wide */
120 #define UCR4_INVR       (1<<9)  /* Inverted infrared reception */
121 #define UCR4_ENIRI      (1<<8)  /* Serial infrared interrupt enable */
122 #define UCR4_WKEN       (1<<7)  /* Wake interrupt enable */
123 #define UCR4_REF16      (1<<6)  /* Ref freq 16 MHz */
124 #define UCR4_IDDMAEN    (1<<6)  /* DMA IDLE Condition Detected */
125 #define UCR4_IRSC       (1<<5)  /* IR special case */
126 #define UCR4_TCEN       (1<<3)  /* Transmit complete interrupt enable */
127 #define UCR4_BKEN       (1<<2)  /* Break condition interrupt enable */
128 #define UCR4_OREN       (1<<1)  /* Receiver overrun interrupt enable */
129 #define UCR4_DREN       (1<<0)  /* Recv data ready interrupt enable */
130 #define UFCR_RXTL_SHF   0       /* Receiver trigger level shift */
131 #define UFCR_DCEDTE     (1<<6)  /* DCE/DTE mode select */
132 #define UFCR_RFDIV      (7<<7)  /* Reference freq divider mask */
133 #define UFCR_RFDIV_REG(x)       (((x) < 7 ? 6 - (x) : 6) << 7)
134 #define UFCR_TXTL_SHF   10      /* Transmitter trigger level shift */
135 #define USR1_PARITYERR  (1<<15) /* Parity error interrupt flag */
136 #define USR1_RTSS       (1<<14) /* RTS pin status */
137 #define USR1_TRDY       (1<<13) /* Transmitter ready interrupt/dma flag */
138 #define USR1_RTSD       (1<<12) /* RTS delta */
139 #define USR1_ESCF       (1<<11) /* Escape seq interrupt flag */
140 #define USR1_FRAMERR    (1<<10) /* Frame error interrupt flag */
141 #define USR1_RRDY       (1<<9)   /* Receiver ready interrupt/dma flag */
142 #define USR1_AGTIM      (1<<8)   /* Ageing timer interrupt flag */
143 #define USR1_TIMEOUT    (1<<7)   /* Receive timeout interrupt status */
144 #define USR1_RXDS        (1<<6)  /* Receiver idle interrupt flag */
145 #define USR1_AIRINT      (1<<5)  /* Async IR wake interrupt flag */
146 #define USR1_AWAKE       (1<<4)  /* Aysnc wake interrupt flag */
147 #define USR2_ADET        (1<<15) /* Auto baud rate detect complete */
148 #define USR2_TXFE        (1<<14) /* Transmit buffer FIFO empty */
149 #define USR2_DTRF        (1<<13) /* DTR edge interrupt flag */
150 #define USR2_IDLE        (1<<12) /* Idle condition */
151 #define USR2_RIDELT      (1<<10) /* Ring Interrupt Delta */
152 #define USR2_RIIN        (1<<9)  /* Ring Indicator Input */
153 #define USR2_IRINT       (1<<8)  /* Serial infrared interrupt flag */
154 #define USR2_WAKE        (1<<7)  /* Wake */
155 #define USR2_DCDIN       (1<<5)  /* Data Carrier Detect Input */
156 #define USR2_RTSF        (1<<4)  /* RTS edge interrupt flag */
157 #define USR2_TXDC        (1<<3)  /* Transmitter complete */
158 #define USR2_BRCD        (1<<2)  /* Break condition */
159 #define USR2_ORE        (1<<1)   /* Overrun error */
160 #define USR2_RDR        (1<<0)   /* Recv data ready */
161 #define UTS_FRCPERR     (1<<13) /* Force parity error */
162 #define UTS_LOOP        (1<<12)  /* Loop tx and rx */
163 #define UTS_TXEMPTY      (1<<6)  /* TxFIFO empty */
164 #define UTS_RXEMPTY      (1<<5)  /* RxFIFO empty */
165 #define UTS_TXFULL       (1<<4)  /* TxFIFO full */
166 #define UTS_RXFULL       (1<<3)  /* RxFIFO full */
167 #define UTS_SOFTRST      (1<<0)  /* Software reset */
168
169 /* We've been assigned a range on the "Low-density serial ports" major */
170 #define SERIAL_IMX_MAJOR        207
171 #define MINOR_START             16
172 #define DEV_NAME                "ttymxc"
173
174 /*
175  * This determines how often we check the modem status signals
176  * for any change.  They generally aren't connected to an IRQ
177  * so we have to poll them.  We also check immediately before
178  * filling the TX fifo incase CTS has been dropped.
179  */
180 #define MCTRL_TIMEOUT   (250*HZ/1000)
181
182 #define DRIVER_NAME "IMX-uart"
183
184 #define UART_NR 8
185
186 /* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
187 enum imx_uart_type {
188         IMX1_UART,
189         IMX21_UART,
190         IMX6Q_UART,
191 };
192
193 /* device type dependent stuff */
194 struct imx_uart_data {
195         unsigned uts_reg;
196         enum imx_uart_type devtype;
197 };
198
199 struct imx_port {
200         struct uart_port        port;
201         struct timer_list       timer;
202         unsigned int            old_status;
203         unsigned int            have_rtscts:1;
204         unsigned int            dte_mode:1;
205         unsigned int            irda_inv_rx:1;
206         unsigned int            irda_inv_tx:1;
207         unsigned short          trcv_delay; /* transceiver delay */
208         struct clk              *clk_ipg;
209         struct clk              *clk_per;
210         const struct imx_uart_data *devdata;
211
212         /* DMA fields */
213         unsigned int            dma_is_inited:1;
214         unsigned int            dma_is_enabled:1;
215         unsigned int            dma_is_rxing:1;
216         unsigned int            dma_is_txing:1;
217         struct dma_chan         *dma_chan_rx, *dma_chan_tx;
218         struct scatterlist      rx_sgl, tx_sgl[2];
219         void                    *rx_buf;
220         unsigned int            tx_bytes;
221         unsigned int            dma_tx_nents;
222         wait_queue_head_t       dma_wait;
223         unsigned int            saved_reg[10];
224         bool                    context_saved;
225 };
226
227 struct imx_port_ucrs {
228         unsigned int    ucr1;
229         unsigned int    ucr2;
230         unsigned int    ucr3;
231 };
232
233 static struct imx_uart_data imx_uart_devdata[] = {
234         [IMX1_UART] = {
235                 .uts_reg = IMX1_UTS,
236                 .devtype = IMX1_UART,
237         },
238         [IMX21_UART] = {
239                 .uts_reg = IMX21_UTS,
240                 .devtype = IMX21_UART,
241         },
242         [IMX6Q_UART] = {
243                 .uts_reg = IMX21_UTS,
244                 .devtype = IMX6Q_UART,
245         },
246 };
247
248 static const struct platform_device_id imx_uart_devtype[] = {
249         {
250                 .name = "imx1-uart",
251                 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
252         }, {
253                 .name = "imx21-uart",
254                 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
255         }, {
256                 .name = "imx6q-uart",
257                 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART],
258         }, {
259                 /* sentinel */
260         }
261 };
262 MODULE_DEVICE_TABLE(platform, imx_uart_devtype);
263
264 static const struct of_device_id imx_uart_dt_ids[] = {
265         { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], },
266         { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
267         { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
268         { /* sentinel */ }
269 };
270 MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
271
272 static inline unsigned uts_reg(struct imx_port *sport)
273 {
274         return sport->devdata->uts_reg;
275 }
276
277 static inline int is_imx1_uart(struct imx_port *sport)
278 {
279         return sport->devdata->devtype == IMX1_UART;
280 }
281
282 static inline int is_imx21_uart(struct imx_port *sport)
283 {
284         return sport->devdata->devtype == IMX21_UART;
285 }
286
287 static inline int is_imx6q_uart(struct imx_port *sport)
288 {
289         return sport->devdata->devtype == IMX6Q_UART;
290 }
291 /*
292  * Save and restore functions for UCR1, UCR2 and UCR3 registers
293  */
294 #if defined(CONFIG_SERIAL_IMX_CONSOLE)
295 static void imx_port_ucrs_save(struct uart_port *port,
296                                struct imx_port_ucrs *ucr)
297 {
298         /* save control registers */
299         ucr->ucr1 = readl(port->membase + UCR1);
300         ucr->ucr2 = readl(port->membase + UCR2);
301         ucr->ucr3 = readl(port->membase + UCR3);
302 }
303
304 static void imx_port_ucrs_restore(struct uart_port *port,
305                                   struct imx_port_ucrs *ucr)
306 {
307         /* restore control registers */
308         writel(ucr->ucr1, port->membase + UCR1);
309         writel(ucr->ucr2, port->membase + UCR2);
310         writel(ucr->ucr3, port->membase + UCR3);
311 }
312 #endif
313
314 /*
315  * interrupts disabled on entry
316  */
317 static void imx_stop_tx(struct uart_port *port)
318 {
319         struct imx_port *sport = (struct imx_port *)port;
320         unsigned long temp;
321
322         /*
323          * We are maybe in the SMP context, so if the DMA TX thread is running
324          * on other cpu, we have to wait for it to finish.
325          */
326         if (sport->dma_is_enabled && sport->dma_is_txing)
327                 return;
328
329         temp = readl(port->membase + UCR1);
330         writel(temp & ~UCR1_TXMPTYEN, port->membase + UCR1);
331
332         /* in rs485 mode disable transmitter if shifter is empty */
333         if (port->rs485.flags & SER_RS485_ENABLED &&
334             readl(port->membase + USR2) & USR2_TXDC) {
335                 temp = readl(port->membase + UCR2);
336                 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
337                         temp &= ~UCR2_CTS;
338                 else
339                         temp |= UCR2_CTS;
340                 writel(temp, port->membase + UCR2);
341
342                 temp = readl(port->membase + UCR4);
343                 temp &= ~UCR4_TCEN;
344                 writel(temp, port->membase + UCR4);
345         }
346 }
347
348 /*
349  * interrupts disabled on entry
350  */
351 static void imx_stop_rx(struct uart_port *port)
352 {
353         struct imx_port *sport = (struct imx_port *)port;
354         unsigned long temp;
355
356         if (sport->dma_is_enabled && sport->dma_is_rxing) {
357                 if (sport->port.suspended) {
358                         dmaengine_terminate_all(sport->dma_chan_rx);
359                         sport->dma_is_rxing = 0;
360                 } else {
361                         return;
362                 }
363         }
364
365         temp = readl(sport->port.membase + UCR2);
366         writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
367
368         /* disable the `Receiver Ready Interrrupt` */
369         temp = readl(sport->port.membase + UCR1);
370         writel(temp & ~UCR1_RRDYEN, sport->port.membase + UCR1);
371 }
372
373 /*
374  * Set the modem control timer to fire immediately.
375  */
376 static void imx_enable_ms(struct uart_port *port)
377 {
378         struct imx_port *sport = (struct imx_port *)port;
379
380         mod_timer(&sport->timer, jiffies);
381 }
382
383 static void imx_dma_tx(struct imx_port *sport);
384 static inline void imx_transmit_buffer(struct imx_port *sport)
385 {
386         struct circ_buf *xmit = &sport->port.state->xmit;
387         unsigned long temp;
388
389         if (sport->port.x_char) {
390                 /* Send next char */
391                 writel(sport->port.x_char, sport->port.membase + URTX0);
392                 sport->port.icount.tx++;
393                 sport->port.x_char = 0;
394                 return;
395         }
396
397         if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
398                 imx_stop_tx(&sport->port);
399                 return;
400         }
401
402         if (sport->dma_is_enabled) {
403                 /*
404                  * We've just sent a X-char Ensure the TX DMA is enabled
405                  * and the TX IRQ is disabled.
406                  **/
407                 temp = readl(sport->port.membase + UCR1);
408                 temp &= ~UCR1_TXMPTYEN;
409                 if (sport->dma_is_txing) {
410                         temp |= UCR1_TDMAEN;
411                         writel(temp, sport->port.membase + UCR1);
412                 } else {
413                         writel(temp, sport->port.membase + UCR1);
414                         imx_dma_tx(sport);
415                 }
416         }
417
418         while (!uart_circ_empty(xmit) &&
419                !(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) {
420                 /* send xmit->buf[xmit->tail]
421                  * out the port here */
422                 writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
423                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
424                 sport->port.icount.tx++;
425         }
426
427         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
428                 uart_write_wakeup(&sport->port);
429
430         if (uart_circ_empty(xmit))
431                 imx_stop_tx(&sport->port);
432 }
433
434 static void dma_tx_callback(void *data)
435 {
436         struct imx_port *sport = data;
437         struct scatterlist *sgl = &sport->tx_sgl[0];
438         struct circ_buf *xmit = &sport->port.state->xmit;
439         unsigned long flags;
440         unsigned long temp;
441
442         spin_lock_irqsave(&sport->port.lock, flags);
443
444         dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
445
446         temp = readl(sport->port.membase + UCR1);
447         temp &= ~UCR1_TDMAEN;
448         writel(temp, sport->port.membase + UCR1);
449
450         /* update the stat */
451         xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
452         sport->port.icount.tx += sport->tx_bytes;
453
454         dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
455
456         sport->dma_is_txing = 0;
457
458         spin_unlock_irqrestore(&sport->port.lock, flags);
459
460         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
461                 uart_write_wakeup(&sport->port);
462
463         if (waitqueue_active(&sport->dma_wait)) {
464                 wake_up(&sport->dma_wait);
465                 dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
466                 return;
467         }
468
469         spin_lock_irqsave(&sport->port.lock, flags);
470         if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
471                 imx_dma_tx(sport);
472         spin_unlock_irqrestore(&sport->port.lock, flags);
473 }
474
475 static void imx_dma_tx(struct imx_port *sport)
476 {
477         struct circ_buf *xmit = &sport->port.state->xmit;
478         struct scatterlist *sgl = sport->tx_sgl;
479         struct dma_async_tx_descriptor *desc;
480         struct dma_chan *chan = sport->dma_chan_tx;
481         struct device *dev = sport->port.dev;
482         unsigned long temp;
483         int ret;
484
485         if (sport->dma_is_txing)
486                 return;
487
488         sport->tx_bytes = uart_circ_chars_pending(xmit);
489
490         if (xmit->tail < xmit->head) {
491                 sport->dma_tx_nents = 1;
492                 sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
493         } else {
494                 sport->dma_tx_nents = 2;
495                 sg_init_table(sgl, 2);
496                 sg_set_buf(sgl, xmit->buf + xmit->tail,
497                                 UART_XMIT_SIZE - xmit->tail);
498                 sg_set_buf(sgl + 1, xmit->buf, xmit->head);
499         }
500
501         ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
502         if (ret == 0) {
503                 dev_err(dev, "DMA mapping error for TX.\n");
504                 return;
505         }
506         desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
507                                         DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
508         if (!desc) {
509                 dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
510                              DMA_TO_DEVICE);
511                 dev_err(dev, "We cannot prepare for the TX slave dma!\n");
512                 return;
513         }
514         desc->callback = dma_tx_callback;
515         desc->callback_param = sport;
516
517         dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
518                         uart_circ_chars_pending(xmit));
519
520         temp = readl(sport->port.membase + UCR1);
521         temp |= UCR1_TDMAEN;
522         writel(temp, sport->port.membase + UCR1);
523
524         /* fire it */
525         sport->dma_is_txing = 1;
526         dmaengine_submit(desc);
527         dma_async_issue_pending(chan);
528         return;
529 }
530
531 /*
532  * interrupts disabled on entry
533  */
534 static void imx_start_tx(struct uart_port *port)
535 {
536         struct imx_port *sport = (struct imx_port *)port;
537         unsigned long temp;
538
539         if (port->rs485.flags & SER_RS485_ENABLED) {
540                 /* enable transmitter and shifter empty irq */
541                 temp = readl(port->membase + UCR2);
542                 if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
543                         temp &= ~UCR2_CTS;
544                 else
545                         temp |= UCR2_CTS;
546                 writel(temp, port->membase + UCR2);
547
548                 temp = readl(port->membase + UCR4);
549                 temp |= UCR4_TCEN;
550                 writel(temp, port->membase + UCR4);
551         }
552
553         if (!sport->dma_is_enabled) {
554                 temp = readl(sport->port.membase + UCR1);
555                 writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
556         }
557
558         if (sport->dma_is_enabled) {
559                 if (sport->port.x_char) {
560                         /* We have X-char to send, so enable TX IRQ and
561                          * disable TX DMA to let TX interrupt to send X-char */
562                         temp = readl(sport->port.membase + UCR1);
563                         temp &= ~UCR1_TDMAEN;
564                         temp |= UCR1_TXMPTYEN;
565                         writel(temp, sport->port.membase + UCR1);
566                         return;
567                 }
568
569                 if (!uart_circ_empty(&port->state->xmit) &&
570                     !uart_tx_stopped(port))
571                         imx_dma_tx(sport);
572                 return;
573         }
574 }
575
576 static irqreturn_t imx_rtsint(int irq, void *dev_id)
577 {
578         struct imx_port *sport = dev_id;
579         unsigned int val;
580         unsigned long flags;
581
582         spin_lock_irqsave(&sport->port.lock, flags);
583
584         writel(USR1_RTSD, sport->port.membase + USR1);
585         val = readl(sport->port.membase + USR1) & USR1_RTSS;
586         uart_handle_cts_change(&sport->port, !!val);
587         wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
588
589         spin_unlock_irqrestore(&sport->port.lock, flags);
590         return IRQ_HANDLED;
591 }
592
593 static irqreturn_t imx_txint(int irq, void *dev_id)
594 {
595         struct imx_port *sport = dev_id;
596         unsigned long flags;
597
598         spin_lock_irqsave(&sport->port.lock, flags);
599         imx_transmit_buffer(sport);
600         spin_unlock_irqrestore(&sport->port.lock, flags);
601         return IRQ_HANDLED;
602 }
603
604 static irqreturn_t imx_rxint(int irq, void *dev_id)
605 {
606         struct imx_port *sport = dev_id;
607         unsigned int rx, flg, ignored = 0;
608         struct tty_port *port = &sport->port.state->port;
609         unsigned long flags, temp;
610
611         spin_lock_irqsave(&sport->port.lock, flags);
612
613         while (readl(sport->port.membase + USR2) & USR2_RDR) {
614                 flg = TTY_NORMAL;
615                 sport->port.icount.rx++;
616
617                 rx = readl(sport->port.membase + URXD0);
618
619                 temp = readl(sport->port.membase + USR2);
620                 if (temp & USR2_BRCD) {
621                         writel(USR2_BRCD, sport->port.membase + USR2);
622                         if (uart_handle_break(&sport->port))
623                                 continue;
624                 }
625
626                 if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
627                         continue;
628
629                 if (unlikely(rx & URXD_ERR)) {
630                         if (rx & URXD_BRK)
631                                 sport->port.icount.brk++;
632                         else if (rx & URXD_PRERR)
633                                 sport->port.icount.parity++;
634                         else if (rx & URXD_FRMERR)
635                                 sport->port.icount.frame++;
636                         if (rx & URXD_OVRRUN)
637                                 sport->port.icount.overrun++;
638
639                         if (rx & sport->port.ignore_status_mask) {
640                                 if (++ignored > 100)
641                                         goto out;
642                                 continue;
643                         }
644
645                         rx &= (sport->port.read_status_mask | 0xFF);
646
647                         if (rx & URXD_BRK)
648                                 flg = TTY_BREAK;
649                         else if (rx & URXD_PRERR)
650                                 flg = TTY_PARITY;
651                         else if (rx & URXD_FRMERR)
652                                 flg = TTY_FRAME;
653                         if (rx & URXD_OVRRUN)
654                                 flg = TTY_OVERRUN;
655
656 #ifdef SUPPORT_SYSRQ
657                         sport->port.sysrq = 0;
658 #endif
659                 }
660
661                 if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
662                         goto out;
663
664                 if (tty_insert_flip_char(port, rx, flg) == 0)
665                         sport->port.icount.buf_overrun++;
666         }
667
668 out:
669         spin_unlock_irqrestore(&sport->port.lock, flags);
670         tty_flip_buffer_push(port);
671         return IRQ_HANDLED;
672 }
673
674 static int start_rx_dma(struct imx_port *sport);
675 /*
676  * If the RXFIFO is filled with some data, and then we
677  * arise a DMA operation to receive them.
678  */
679 static void imx_dma_rxint(struct imx_port *sport)
680 {
681         unsigned long temp;
682         unsigned long flags;
683
684         spin_lock_irqsave(&sport->port.lock, flags);
685
686         temp = readl(sport->port.membase + USR2);
687         if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
688                 sport->dma_is_rxing = 1;
689
690                 /* disable the receiver ready and aging timer interrupts */
691                 temp = readl(sport->port.membase + UCR1);
692                 temp &= ~(UCR1_RRDYEN);
693                 writel(temp, sport->port.membase + UCR1);
694
695                 temp = readl(sport->port.membase + UCR2);
696                 temp &= ~(UCR2_ATEN);
697                 writel(temp, sport->port.membase + UCR2);
698
699                 /* tell the DMA to receive the data. */
700                 start_rx_dma(sport);
701         }
702
703         spin_unlock_irqrestore(&sport->port.lock, flags);
704 }
705
706 static irqreturn_t imx_int(int irq, void *dev_id)
707 {
708         struct imx_port *sport = dev_id;
709         unsigned int sts;
710         unsigned int sts2;
711
712         sts = readl(sport->port.membase + USR1);
713         sts2 = readl(sport->port.membase + USR2);
714
715         if (sts & (USR1_RRDY | USR1_AGTIM)) {
716                 if (sport->dma_is_enabled)
717                         imx_dma_rxint(sport);
718                 else
719                         imx_rxint(irq, dev_id);
720         }
721
722         if ((sts & USR1_TRDY &&
723              readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) ||
724             (sts2 & USR2_TXDC &&
725              readl(sport->port.membase + UCR4) & UCR4_TCEN))
726                 imx_txint(irq, dev_id);
727
728         if (sts & USR1_RTSD)
729                 imx_rtsint(irq, dev_id);
730
731         if (sts & USR1_AWAKE)
732                 writel(USR1_AWAKE, sport->port.membase + USR1);
733
734         if (sts2 & USR2_ORE) {
735                 sport->port.icount.overrun++;
736                 writel(USR2_ORE, sport->port.membase + USR2);
737         }
738
739         return IRQ_HANDLED;
740 }
741
742 /*
743  * Return TIOCSER_TEMT when transmitter is not busy.
744  */
745 static unsigned int imx_tx_empty(struct uart_port *port)
746 {
747         struct imx_port *sport = (struct imx_port *)port;
748         unsigned int ret;
749
750         ret = (readl(sport->port.membase + USR2) & USR2_TXDC) ?  TIOCSER_TEMT : 0;
751
752         /* If the TX DMA is working, return 0. */
753         if (sport->dma_is_enabled && sport->dma_is_txing)
754                 ret = 0;
755
756         return ret;
757 }
758
759 /*
760  * We have a modem side uart, so the meanings of RTS and CTS are inverted.
761  */
762 static unsigned int imx_get_mctrl(struct uart_port *port)
763 {
764         struct imx_port *sport = (struct imx_port *)port;
765         unsigned int tmp = TIOCM_DSR;
766         unsigned usr1 = readl(sport->port.membase + USR1);
767
768         if (usr1 & USR1_RTSS)
769                 tmp |= TIOCM_CTS;
770
771         /* in DCE mode DCDIN is always 0 */
772         if (!(usr1 & USR2_DCDIN))
773                 tmp |= TIOCM_CAR;
774
775         /* in DCE mode RIIN is always 0 */
776         if (readl(sport->port.membase + USR2) & USR2_RIIN)
777                 tmp |= TIOCM_RI;
778
779         return tmp;
780 }
781
782 static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
783 {
784         struct imx_port *sport = (struct imx_port *)port;
785         unsigned long temp;
786
787         if (!(port->rs485.flags & SER_RS485_ENABLED)) {
788                 temp = readl(sport->port.membase + UCR2);
789                 temp &= ~(UCR2_CTS | UCR2_CTSC);
790                 if (mctrl & TIOCM_RTS)
791                         temp |= UCR2_CTS | UCR2_CTSC;
792                 writel(temp, sport->port.membase + UCR2);
793         }
794
795         temp = readl(sport->port.membase + UCR3) & ~UCR3_DSR;
796         if (!(mctrl & TIOCM_DTR))
797                 temp |= UCR3_DSR;
798         writel(temp, sport->port.membase + UCR3);
799
800         temp = readl(sport->port.membase + uts_reg(sport)) & ~UTS_LOOP;
801         if (mctrl & TIOCM_LOOP)
802                 temp |= UTS_LOOP;
803         writel(temp, sport->port.membase + uts_reg(sport));
804 }
805
806 /*
807  * Interrupts always disabled.
808  */
809 static void imx_break_ctl(struct uart_port *port, int break_state)
810 {
811         struct imx_port *sport = (struct imx_port *)port;
812         unsigned long flags, temp;
813
814         spin_lock_irqsave(&sport->port.lock, flags);
815
816         temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK;
817
818         if (break_state != 0)
819                 temp |= UCR1_SNDBRK;
820
821         writel(temp, sport->port.membase + UCR1);
822
823         spin_unlock_irqrestore(&sport->port.lock, flags);
824 }
825
826 /*
827  * Handle any change of modem status signal since we were last called.
828  */
829 static void imx_mctrl_check(struct imx_port *sport)
830 {
831         unsigned int status, changed;
832
833         status = imx_get_mctrl(&sport->port);
834         changed = status ^ sport->old_status;
835
836         if (changed == 0)
837                 return;
838
839         sport->old_status = status;
840
841         if (changed & TIOCM_RI)
842                 sport->port.icount.rng++;
843         if (changed & TIOCM_DSR)
844                 sport->port.icount.dsr++;
845         if (changed & TIOCM_CAR)
846                 uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
847         if (changed & TIOCM_CTS)
848                 uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
849
850         wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
851 }
852
853 /*
854  * This is our per-port timeout handler, for checking the
855  * modem status signals.
856  */
857 static void imx_timeout(unsigned long data)
858 {
859         struct imx_port *sport = (struct imx_port *)data;
860         unsigned long flags;
861
862         if (sport->port.state) {
863                 spin_lock_irqsave(&sport->port.lock, flags);
864                 imx_mctrl_check(sport);
865                 spin_unlock_irqrestore(&sport->port.lock, flags);
866
867                 mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
868         }
869 }
870
871 #define RX_BUF_SIZE     (PAGE_SIZE)
872 static void imx_rx_dma_done(struct imx_port *sport)
873 {
874         unsigned long temp;
875         unsigned long flags;
876
877         spin_lock_irqsave(&sport->port.lock, flags);
878
879         /* re-enable interrupts to get notified when new symbols are incoming */
880         temp = readl(sport->port.membase + UCR1);
881         temp |= UCR1_RRDYEN;
882         writel(temp, sport->port.membase + UCR1);
883
884         temp = readl(sport->port.membase + UCR2);
885         temp |= UCR2_ATEN;
886         writel(temp, sport->port.membase + UCR2);
887
888         sport->dma_is_rxing = 0;
889
890         /* Is the shutdown waiting for us? */
891         if (waitqueue_active(&sport->dma_wait))
892                 wake_up(&sport->dma_wait);
893
894         spin_unlock_irqrestore(&sport->port.lock, flags);
895 }
896
897 /*
898  * There are two kinds of RX DMA interrupts(such as in the MX6Q):
899  *   [1] the RX DMA buffer is full.
900  *   [2] the aging timer expires
901  *
902  * Condition [2] is triggered when a character has been sitting in the FIFO
903  * for at least 8 byte durations.
904  */
905 static void dma_rx_callback(void *data)
906 {
907         struct imx_port *sport = data;
908         struct dma_chan *chan = sport->dma_chan_rx;
909         struct scatterlist *sgl = &sport->rx_sgl;
910         struct tty_port *port = &sport->port.state->port;
911         struct dma_tx_state state;
912         enum dma_status status;
913         unsigned int count;
914
915         /* unmap it first */
916         dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
917
918         status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
919         count = RX_BUF_SIZE - state.residue;
920
921         dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
922
923         if (count) {
924                 if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
925                         int bytes = tty_insert_flip_string(port, sport->rx_buf,
926                                         count);
927
928                         if (bytes != count)
929                                 sport->port.icount.buf_overrun++;
930                 }
931                 tty_flip_buffer_push(port);
932                 sport->port.icount.rx += count;
933         }
934
935         /*
936          * Restart RX DMA directly if more data is available in order to skip
937          * the roundtrip through the IRQ handler. If there is some data already
938          * in the FIFO, DMA needs to be restarted soon anyways.
939          *
940          * Otherwise stop the DMA and reactivate FIFO IRQs to restart DMA once
941          * data starts to arrive again.
942          */
943         if (readl(sport->port.membase + USR2) & USR2_RDR)
944                 start_rx_dma(sport);
945         else
946                 imx_rx_dma_done(sport);
947 }
948
949 static int start_rx_dma(struct imx_port *sport)
950 {
951         struct scatterlist *sgl = &sport->rx_sgl;
952         struct dma_chan *chan = sport->dma_chan_rx;
953         struct device *dev = sport->port.dev;
954         struct dma_async_tx_descriptor *desc;
955         int ret;
956
957         sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
958         ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
959         if (ret == 0) {
960                 dev_err(dev, "DMA mapping error for RX.\n");
961                 return -EINVAL;
962         }
963         desc = dmaengine_prep_slave_sg(chan, sgl, 1, DMA_DEV_TO_MEM,
964                                         DMA_PREP_INTERRUPT);
965         if (!desc) {
966                 dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE);
967                 dev_err(dev, "We cannot prepare for the RX slave dma!\n");
968                 return -EINVAL;
969         }
970         desc->callback = dma_rx_callback;
971         desc->callback_param = sport;
972
973         dev_dbg(dev, "RX: prepare for the DMA.\n");
974         dmaengine_submit(desc);
975         dma_async_issue_pending(chan);
976         return 0;
977 }
978
979 #define TXTL_DEFAULT 2 /* reset default */
980 #define RXTL_DEFAULT 1 /* reset default */
981 #define TXTL_DMA 8 /* DMA burst setting */
982 #define RXTL_DMA 9 /* DMA burst setting */
983
984 static void imx_setup_ufcr(struct imx_port *sport,
985                           unsigned char txwl, unsigned char rxwl)
986 {
987         unsigned int val;
988
989         /* set receiver / transmitter trigger level */
990         val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
991         val |= txwl << UFCR_TXTL_SHF | rxwl;
992         writel(val, sport->port.membase + UFCR);
993 }
994
995 static void imx_uart_dma_exit(struct imx_port *sport)
996 {
997         if (sport->dma_chan_rx) {
998                 dma_release_channel(sport->dma_chan_rx);
999                 sport->dma_chan_rx = NULL;
1000
1001                 kfree(sport->rx_buf);
1002                 sport->rx_buf = NULL;
1003         }
1004
1005         if (sport->dma_chan_tx) {
1006                 dma_release_channel(sport->dma_chan_tx);
1007                 sport->dma_chan_tx = NULL;
1008         }
1009
1010         sport->dma_is_inited = 0;
1011 }
1012
1013 static int imx_uart_dma_init(struct imx_port *sport)
1014 {
1015         struct dma_slave_config slave_config = {};
1016         struct device *dev = sport->port.dev;
1017         int ret;
1018
1019         /* Prepare for RX : */
1020         sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
1021         if (!sport->dma_chan_rx) {
1022                 dev_dbg(dev, "cannot get the DMA channel.\n");
1023                 ret = -EINVAL;
1024                 goto err;
1025         }
1026
1027         slave_config.direction = DMA_DEV_TO_MEM;
1028         slave_config.src_addr = sport->port.mapbase + URXD0;
1029         slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1030         /* one byte less than the watermark level to enable the aging timer */
1031         slave_config.src_maxburst = RXTL_DMA - 1;
1032         ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config);
1033         if (ret) {
1034                 dev_err(dev, "error in RX dma configuration.\n");
1035                 goto err;
1036         }
1037
1038         sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1039         if (!sport->rx_buf) {
1040                 ret = -ENOMEM;
1041                 goto err;
1042         }
1043
1044         /* Prepare for TX : */
1045         sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
1046         if (!sport->dma_chan_tx) {
1047                 dev_err(dev, "cannot get the TX DMA channel!\n");
1048                 ret = -EINVAL;
1049                 goto err;
1050         }
1051
1052         slave_config.direction = DMA_MEM_TO_DEV;
1053         slave_config.dst_addr = sport->port.mapbase + URTX0;
1054         slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1055         slave_config.dst_maxburst = TXTL_DMA;
1056         ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config);
1057         if (ret) {
1058                 dev_err(dev, "error in TX dma configuration.");
1059                 goto err;
1060         }
1061
1062         sport->dma_is_inited = 1;
1063
1064         return 0;
1065 err:
1066         imx_uart_dma_exit(sport);
1067         return ret;
1068 }
1069
1070 static void imx_enable_dma(struct imx_port *sport)
1071 {
1072         unsigned long temp;
1073
1074         init_waitqueue_head(&sport->dma_wait);
1075
1076         /* set UCR1 */
1077         temp = readl(sport->port.membase + UCR1);
1078         temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN;
1079         writel(temp, sport->port.membase + UCR1);
1080
1081         temp = readl(sport->port.membase + UCR2);
1082         temp |= UCR2_ATEN;
1083         writel(temp, sport->port.membase + UCR2);
1084
1085         imx_setup_ufcr(sport, TXTL_DMA, RXTL_DMA);
1086
1087         sport->dma_is_enabled = 1;
1088 }
1089
1090 static void imx_disable_dma(struct imx_port *sport)
1091 {
1092         unsigned long temp;
1093
1094         /* clear UCR1 */
1095         temp = readl(sport->port.membase + UCR1);
1096         temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN);
1097         writel(temp, sport->port.membase + UCR1);
1098
1099         /* clear UCR2 */
1100         temp = readl(sport->port.membase + UCR2);
1101         temp &= ~(UCR2_CTSC | UCR2_CTS | UCR2_ATEN);
1102         writel(temp, sport->port.membase + UCR2);
1103
1104         imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1105
1106         sport->dma_is_enabled = 0;
1107 }
1108
1109 /* half the RX buffer size */
1110 #define CTSTL 16
1111
1112 static int imx_startup(struct uart_port *port)
1113 {
1114         struct imx_port *sport = (struct imx_port *)port;
1115         int retval, i;
1116         unsigned long flags, temp;
1117
1118         retval = clk_prepare_enable(sport->clk_per);
1119         if (retval)
1120                 return retval;
1121         retval = clk_prepare_enable(sport->clk_ipg);
1122         if (retval) {
1123                 clk_disable_unprepare(sport->clk_per);
1124                 return retval;
1125         }
1126
1127         imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1128
1129         /* disable the DREN bit (Data Ready interrupt enable) before
1130          * requesting IRQs
1131          */
1132         temp = readl(sport->port.membase + UCR4);
1133
1134         /* set the trigger level for CTS */
1135         temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
1136         temp |= CTSTL << UCR4_CTSTL_SHF;
1137
1138         writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
1139
1140         /* Can we enable the DMA support? */
1141         if (is_imx6q_uart(sport) && !uart_console(port) &&
1142             !sport->dma_is_inited)
1143                 imx_uart_dma_init(sport);
1144
1145         spin_lock_irqsave(&sport->port.lock, flags);
1146         /* Reset fifo's and state machines */
1147         i = 100;
1148
1149         temp = readl(sport->port.membase + UCR2);
1150         temp &= ~UCR2_SRST;
1151         writel(temp, sport->port.membase + UCR2);
1152
1153         while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
1154                 udelay(1);
1155
1156         /*
1157          * Finally, clear and enable interrupts
1158          */
1159         writel(USR1_RTSD, sport->port.membase + USR1);
1160         writel(USR2_ORE, sport->port.membase + USR2);
1161
1162         if (sport->dma_is_inited && !sport->dma_is_enabled)
1163                 imx_enable_dma(sport);
1164
1165         temp = readl(sport->port.membase + UCR1);
1166         temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
1167
1168         writel(temp, sport->port.membase + UCR1);
1169
1170         temp = readl(sport->port.membase + UCR4);
1171         temp |= UCR4_OREN;
1172         writel(temp, sport->port.membase + UCR4);
1173
1174         temp = readl(sport->port.membase + UCR2);
1175         temp |= (UCR2_RXEN | UCR2_TXEN);
1176         if (!sport->have_rtscts)
1177                 temp |= UCR2_IRTS;
1178         writel(temp, sport->port.membase + UCR2);
1179
1180         if (!is_imx1_uart(sport)) {
1181                 temp = readl(sport->port.membase + UCR3);
1182                 temp |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
1183                 writel(temp, sport->port.membase + UCR3);
1184         }
1185
1186         /*
1187          * Enable modem status interrupts
1188          */
1189         imx_enable_ms(&sport->port);
1190         spin_unlock_irqrestore(&sport->port.lock, flags);
1191
1192         return 0;
1193 }
1194
1195 static void imx_shutdown(struct uart_port *port)
1196 {
1197         struct imx_port *sport = (struct imx_port *)port;
1198         unsigned long temp;
1199         unsigned long flags;
1200
1201         if (sport->dma_is_enabled) {
1202                 int ret;
1203
1204                 /* We have to wait for the DMA to finish. */
1205                 ret = wait_event_interruptible(sport->dma_wait,
1206                         !sport->dma_is_rxing && !sport->dma_is_txing);
1207                 if (ret != 0) {
1208                         sport->dma_is_rxing = 0;
1209                         sport->dma_is_txing = 0;
1210                         dmaengine_terminate_all(sport->dma_chan_tx);
1211                         dmaengine_terminate_all(sport->dma_chan_rx);
1212                 }
1213                 spin_lock_irqsave(&sport->port.lock, flags);
1214                 imx_stop_tx(port);
1215                 imx_stop_rx(port);
1216                 imx_disable_dma(sport);
1217                 spin_unlock_irqrestore(&sport->port.lock, flags);
1218                 imx_uart_dma_exit(sport);
1219         }
1220
1221         spin_lock_irqsave(&sport->port.lock, flags);
1222         temp = readl(sport->port.membase + UCR2);
1223         temp &= ~(UCR2_TXEN);
1224         writel(temp, sport->port.membase + UCR2);
1225         spin_unlock_irqrestore(&sport->port.lock, flags);
1226
1227         /*
1228          * Stop our timer.
1229          */
1230         del_timer_sync(&sport->timer);
1231
1232         /*
1233          * Disable all interrupts, port and break condition.
1234          */
1235
1236         spin_lock_irqsave(&sport->port.lock, flags);
1237         temp = readl(sport->port.membase + UCR1);
1238         temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
1239
1240         writel(temp, sport->port.membase + UCR1);
1241         spin_unlock_irqrestore(&sport->port.lock, flags);
1242
1243         clk_disable_unprepare(sport->clk_per);
1244         clk_disable_unprepare(sport->clk_ipg);
1245 }
1246
1247 static void imx_flush_buffer(struct uart_port *port)
1248 {
1249         struct imx_port *sport = (struct imx_port *)port;
1250         struct scatterlist *sgl = &sport->tx_sgl[0];
1251         unsigned long temp;
1252         int i = 100, ubir, ubmr, uts;
1253
1254         if (!sport->dma_chan_tx)
1255                 return;
1256
1257         sport->tx_bytes = 0;
1258         dmaengine_terminate_all(sport->dma_chan_tx);
1259         if (sport->dma_is_txing) {
1260                 dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents,
1261                              DMA_TO_DEVICE);
1262                 temp = readl(sport->port.membase + UCR1);
1263                 temp &= ~UCR1_TDMAEN;
1264                 writel(temp, sport->port.membase + UCR1);
1265                 sport->dma_is_txing = false;
1266         }
1267
1268         /*
1269          * According to the Reference Manual description of the UART SRST bit:
1270          * "Reset the transmit and receive state machines,
1271          * all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD
1272          * and UTS[6-3]". As we don't need to restore the old values from
1273          * USR1, USR2, URXD, UTXD, only save/restore the other four registers
1274          */
1275         ubir = readl(sport->port.membase + UBIR);
1276         ubmr = readl(sport->port.membase + UBMR);
1277         uts = readl(sport->port.membase + IMX21_UTS);
1278
1279         temp = readl(sport->port.membase + UCR2);
1280         temp &= ~UCR2_SRST;
1281         writel(temp, sport->port.membase + UCR2);
1282
1283         while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
1284                 udelay(1);
1285
1286         /* Restore the registers */
1287         writel(ubir, sport->port.membase + UBIR);
1288         writel(ubmr, sport->port.membase + UBMR);
1289         writel(uts, sport->port.membase + IMX21_UTS);
1290 }
1291
1292 static void
1293 imx_set_termios(struct uart_port *port, struct ktermios *termios,
1294                    struct ktermios *old)
1295 {
1296         struct imx_port *sport = (struct imx_port *)port;
1297         unsigned long flags;
1298         unsigned int ucr2, old_ucr1, old_ucr2, baud, quot;
1299         unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
1300         unsigned int div, ufcr;
1301         unsigned long num, denom;
1302         uint64_t tdiv64;
1303
1304         /*
1305          * We only support CS7 and CS8.
1306          */
1307         while ((termios->c_cflag & CSIZE) != CS7 &&
1308                (termios->c_cflag & CSIZE) != CS8) {
1309                 termios->c_cflag &= ~CSIZE;
1310                 termios->c_cflag |= old_csize;
1311                 old_csize = CS8;
1312         }
1313
1314         if ((termios->c_cflag & CSIZE) == CS8)
1315                 ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
1316         else
1317                 ucr2 = UCR2_SRST | UCR2_IRTS;
1318
1319         if (termios->c_cflag & CRTSCTS) {
1320                 if (sport->have_rtscts) {
1321                         ucr2 &= ~UCR2_IRTS;
1322
1323                         if (port->rs485.flags & SER_RS485_ENABLED) {
1324                                 /*
1325                                  * RTS is mandatory for rs485 operation, so keep
1326                                  * it under manual control and keep transmitter
1327                                  * disabled.
1328                                  */
1329                                 if (!(port->rs485.flags &
1330                                       SER_RS485_RTS_AFTER_SEND))
1331                                         ucr2 |= UCR2_CTS;
1332                         } else {
1333                                 ucr2 |= UCR2_CTSC;
1334                         }
1335                 } else {
1336                         termios->c_cflag &= ~CRTSCTS;
1337                 }
1338         } else if (port->rs485.flags & SER_RS485_ENABLED)
1339                 /* disable transmitter */
1340                 if (!(port->rs485.flags & SER_RS485_RTS_AFTER_SEND))
1341                         ucr2 |= UCR2_CTS;
1342
1343         if (termios->c_cflag & CSTOPB)
1344                 ucr2 |= UCR2_STPB;
1345         if (termios->c_cflag & PARENB) {
1346                 ucr2 |= UCR2_PREN;
1347                 if (termios->c_cflag & PARODD)
1348                         ucr2 |= UCR2_PROE;
1349         }
1350
1351         del_timer_sync(&sport->timer);
1352
1353         /*
1354          * Ask the core to calculate the divisor for us.
1355          */
1356         baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
1357         quot = uart_get_divisor(port, baud);
1358
1359         spin_lock_irqsave(&sport->port.lock, flags);
1360
1361         sport->port.read_status_mask = 0;
1362         if (termios->c_iflag & INPCK)
1363                 sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
1364         if (termios->c_iflag & (BRKINT | PARMRK))
1365                 sport->port.read_status_mask |= URXD_BRK;
1366
1367         /*
1368          * Characters to ignore
1369          */
1370         sport->port.ignore_status_mask = 0;
1371         if (termios->c_iflag & IGNPAR)
1372                 sport->port.ignore_status_mask |= URXD_PRERR | URXD_FRMERR;
1373         if (termios->c_iflag & IGNBRK) {
1374                 sport->port.ignore_status_mask |= URXD_BRK;
1375                 /*
1376                  * If we're ignoring parity and break indicators,
1377                  * ignore overruns too (for real raw support).
1378                  */
1379                 if (termios->c_iflag & IGNPAR)
1380                         sport->port.ignore_status_mask |= URXD_OVRRUN;
1381         }
1382
1383         if ((termios->c_cflag & CREAD) == 0)
1384                 sport->port.ignore_status_mask |= URXD_DUMMY_READ;
1385
1386         /*
1387          * Update the per-port timeout.
1388          */
1389         uart_update_timeout(port, termios->c_cflag, baud);
1390
1391         /*
1392          * disable interrupts and drain transmitter
1393          */
1394         old_ucr1 = readl(sport->port.membase + UCR1);
1395         writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN),
1396                         sport->port.membase + UCR1);
1397
1398         while (!(readl(sport->port.membase + USR2) & USR2_TXDC))
1399                 barrier();
1400
1401         /* then, disable everything */
1402         old_ucr2 = readl(sport->port.membase + UCR2);
1403         writel(old_ucr2 & ~(UCR2_TXEN | UCR2_RXEN),
1404                         sport->port.membase + UCR2);
1405         old_ucr2 &= (UCR2_TXEN | UCR2_RXEN | UCR2_ATEN);
1406
1407         /* custom-baudrate handling */
1408         div = sport->port.uartclk / (baud * 16);
1409         if (baud == 38400 && quot != div)
1410                 baud = sport->port.uartclk / (quot * 16);
1411
1412         div = sport->port.uartclk / (baud * 16);
1413         if (div > 7)
1414                 div = 7;
1415         if (!div)
1416                 div = 1;
1417
1418         rational_best_approximation(16 * div * baud, sport->port.uartclk,
1419                 1 << 16, 1 << 16, &num, &denom);
1420
1421         tdiv64 = sport->port.uartclk;
1422         tdiv64 *= num;
1423         do_div(tdiv64, denom * 16 * div);
1424         tty_termios_encode_baud_rate(termios,
1425                                 (speed_t)tdiv64, (speed_t)tdiv64);
1426
1427         num -= 1;
1428         denom -= 1;
1429
1430         ufcr = readl(sport->port.membase + UFCR);
1431         ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
1432         if (sport->dte_mode)
1433                 ufcr |= UFCR_DCEDTE;
1434         writel(ufcr, sport->port.membase + UFCR);
1435
1436         writel(num, sport->port.membase + UBIR);
1437         writel(denom, sport->port.membase + UBMR);
1438
1439         if (!is_imx1_uart(sport))
1440                 writel(sport->port.uartclk / div / 1000,
1441                                 sport->port.membase + IMX21_ONEMS);
1442
1443         writel(old_ucr1, sport->port.membase + UCR1);
1444
1445         /* set the parity, stop bits and data size */
1446         writel(ucr2 | old_ucr2, sport->port.membase + UCR2);
1447
1448         if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1449                 imx_enable_ms(&sport->port);
1450
1451         spin_unlock_irqrestore(&sport->port.lock, flags);
1452 }
1453
1454 static const char *imx_type(struct uart_port *port)
1455 {
1456         struct imx_port *sport = (struct imx_port *)port;
1457
1458         return sport->port.type == PORT_IMX ? "IMX" : NULL;
1459 }
1460
1461 /*
1462  * Configure/autoconfigure the port.
1463  */
1464 static void imx_config_port(struct uart_port *port, int flags)
1465 {
1466         struct imx_port *sport = (struct imx_port *)port;
1467
1468         if (flags & UART_CONFIG_TYPE)
1469                 sport->port.type = PORT_IMX;
1470 }
1471
1472 /*
1473  * Verify the new serial_struct (for TIOCSSERIAL).
1474  * The only change we allow are to the flags and type, and
1475  * even then only between PORT_IMX and PORT_UNKNOWN
1476  */
1477 static int
1478 imx_verify_port(struct uart_port *port, struct serial_struct *ser)
1479 {
1480         struct imx_port *sport = (struct imx_port *)port;
1481         int ret = 0;
1482
1483         if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX)
1484                 ret = -EINVAL;
1485         if (sport->port.irq != ser->irq)
1486                 ret = -EINVAL;
1487         if (ser->io_type != UPIO_MEM)
1488                 ret = -EINVAL;
1489         if (sport->port.uartclk / 16 != ser->baud_base)
1490                 ret = -EINVAL;
1491         if (sport->port.mapbase != (unsigned long)ser->iomem_base)
1492                 ret = -EINVAL;
1493         if (sport->port.iobase != ser->port)
1494                 ret = -EINVAL;
1495         if (ser->hub6 != 0)
1496                 ret = -EINVAL;
1497         return ret;
1498 }
1499
1500 #if defined(CONFIG_CONSOLE_POLL)
1501
1502 static int imx_poll_init(struct uart_port *port)
1503 {
1504         struct imx_port *sport = (struct imx_port *)port;
1505         unsigned long flags;
1506         unsigned long temp;
1507         int retval;
1508
1509         retval = clk_prepare_enable(sport->clk_ipg);
1510         if (retval)
1511                 return retval;
1512         retval = clk_prepare_enable(sport->clk_per);
1513         if (retval)
1514                 clk_disable_unprepare(sport->clk_ipg);
1515
1516         imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1517
1518         spin_lock_irqsave(&sport->port.lock, flags);
1519
1520         temp = readl(sport->port.membase + UCR1);
1521         if (is_imx1_uart(sport))
1522                 temp |= IMX1_UCR1_UARTCLKEN;
1523         temp |= UCR1_UARTEN | UCR1_RRDYEN;
1524         temp &= ~(UCR1_TXMPTYEN | UCR1_RTSDEN);
1525         writel(temp, sport->port.membase + UCR1);
1526
1527         temp = readl(sport->port.membase + UCR2);
1528         temp |= UCR2_RXEN;
1529         writel(temp, sport->port.membase + UCR2);
1530
1531         spin_unlock_irqrestore(&sport->port.lock, flags);
1532
1533         return 0;
1534 }
1535
1536 static int imx_poll_get_char(struct uart_port *port)
1537 {
1538         if (!(readl_relaxed(port->membase + USR2) & USR2_RDR))
1539                 return NO_POLL_CHAR;
1540
1541         return readl_relaxed(port->membase + URXD0) & URXD_RX_DATA;
1542 }
1543
1544 static void imx_poll_put_char(struct uart_port *port, unsigned char c)
1545 {
1546         unsigned int status;
1547
1548         /* drain */
1549         do {
1550                 status = readl_relaxed(port->membase + USR1);
1551         } while (~status & USR1_TRDY);
1552
1553         /* write */
1554         writel_relaxed(c, port->membase + URTX0);
1555
1556         /* flush */
1557         do {
1558                 status = readl_relaxed(port->membase + USR2);
1559         } while (~status & USR2_TXDC);
1560 }
1561 #endif
1562
1563 static int imx_rs485_config(struct uart_port *port,
1564                             struct serial_rs485 *rs485conf)
1565 {
1566         struct imx_port *sport = (struct imx_port *)port;
1567
1568         /* unimplemented */
1569         rs485conf->delay_rts_before_send = 0;
1570         rs485conf->delay_rts_after_send = 0;
1571         rs485conf->flags |= SER_RS485_RX_DURING_TX;
1572
1573         /* RTS is required to control the transmitter */
1574         if (!sport->have_rtscts)
1575                 rs485conf->flags &= ~SER_RS485_ENABLED;
1576
1577         if (rs485conf->flags & SER_RS485_ENABLED) {
1578                 unsigned long temp;
1579
1580                 /* disable transmitter */
1581                 temp = readl(sport->port.membase + UCR2);
1582                 temp &= ~UCR2_CTSC;
1583                 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
1584                         temp &= ~UCR2_CTS;
1585                 else
1586                         temp |= UCR2_CTS;
1587                 writel(temp, sport->port.membase + UCR2);
1588         }
1589
1590         port->rs485 = *rs485conf;
1591
1592         return 0;
1593 }
1594
1595 static struct uart_ops imx_pops = {
1596         .tx_empty       = imx_tx_empty,
1597         .set_mctrl      = imx_set_mctrl,
1598         .get_mctrl      = imx_get_mctrl,
1599         .stop_tx        = imx_stop_tx,
1600         .start_tx       = imx_start_tx,
1601         .stop_rx        = imx_stop_rx,
1602         .enable_ms      = imx_enable_ms,
1603         .break_ctl      = imx_break_ctl,
1604         .startup        = imx_startup,
1605         .shutdown       = imx_shutdown,
1606         .flush_buffer   = imx_flush_buffer,
1607         .set_termios    = imx_set_termios,
1608         .type           = imx_type,
1609         .config_port    = imx_config_port,
1610         .verify_port    = imx_verify_port,
1611 #if defined(CONFIG_CONSOLE_POLL)
1612         .poll_init      = imx_poll_init,
1613         .poll_get_char  = imx_poll_get_char,
1614         .poll_put_char  = imx_poll_put_char,
1615 #endif
1616 };
1617
1618 static struct imx_port *imx_ports[UART_NR];
1619
1620 #ifdef CONFIG_SERIAL_IMX_CONSOLE
1621 static void imx_console_putchar(struct uart_port *port, int ch)
1622 {
1623         struct imx_port *sport = (struct imx_port *)port;
1624
1625         while (readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)
1626                 barrier();
1627
1628         writel(ch, sport->port.membase + URTX0);
1629 }
1630
1631 /*
1632  * Interrupts are disabled on entering
1633  */
1634 static void
1635 imx_console_write(struct console *co, const char *s, unsigned int count)
1636 {
1637         struct imx_port *sport = imx_ports[co->index];
1638         struct imx_port_ucrs old_ucr;
1639         unsigned int ucr1;
1640         unsigned long flags = 0;
1641         int locked = 1;
1642         int retval;
1643
1644         retval = clk_enable(sport->clk_per);
1645         if (retval)
1646                 return;
1647         retval = clk_enable(sport->clk_ipg);
1648         if (retval) {
1649                 clk_disable(sport->clk_per);
1650                 return;
1651         }
1652
1653         if (sport->port.sysrq)
1654                 locked = 0;
1655         else if (oops_in_progress)
1656                 locked = spin_trylock_irqsave(&sport->port.lock, flags);
1657         else
1658                 spin_lock_irqsave(&sport->port.lock, flags);
1659
1660         /*
1661          *      First, save UCR1/2/3 and then disable interrupts
1662          */
1663         imx_port_ucrs_save(&sport->port, &old_ucr);
1664         ucr1 = old_ucr.ucr1;
1665
1666         if (is_imx1_uart(sport))
1667                 ucr1 |= IMX1_UCR1_UARTCLKEN;
1668         ucr1 |= UCR1_UARTEN;
1669         ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);
1670
1671         writel(ucr1, sport->port.membase + UCR1);
1672
1673         writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
1674
1675         uart_console_write(&sport->port, s, count, imx_console_putchar);
1676
1677         /*
1678          *      Finally, wait for transmitter to become empty
1679          *      and restore UCR1/2/3
1680          */
1681         while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
1682
1683         imx_port_ucrs_restore(&sport->port, &old_ucr);
1684
1685         if (locked)
1686                 spin_unlock_irqrestore(&sport->port.lock, flags);
1687
1688         clk_disable(sport->clk_ipg);
1689         clk_disable(sport->clk_per);
1690 }
1691
1692 /*
1693  * If the port was already initialised (eg, by a boot loader),
1694  * try to determine the current setup.
1695  */
1696 static void __init
1697 imx_console_get_options(struct imx_port *sport, int *baud,
1698                            int *parity, int *bits)
1699 {
1700
1701         if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) {
1702                 /* ok, the port was enabled */
1703                 unsigned int ucr2, ubir, ubmr, uartclk;
1704                 unsigned int baud_raw;
1705                 unsigned int ucfr_rfdiv;
1706
1707                 ucr2 = readl(sport->port.membase + UCR2);
1708
1709                 *parity = 'n';
1710                 if (ucr2 & UCR2_PREN) {
1711                         if (ucr2 & UCR2_PROE)
1712                                 *parity = 'o';
1713                         else
1714                                 *parity = 'e';
1715                 }
1716
1717                 if (ucr2 & UCR2_WS)
1718                         *bits = 8;
1719                 else
1720                         *bits = 7;
1721
1722                 ubir = readl(sport->port.membase + UBIR) & 0xffff;
1723                 ubmr = readl(sport->port.membase + UBMR) & 0xffff;
1724
1725                 ucfr_rfdiv = (readl(sport->port.membase + UFCR) & UFCR_RFDIV) >> 7;
1726                 if (ucfr_rfdiv == 6)
1727                         ucfr_rfdiv = 7;
1728                 else
1729                         ucfr_rfdiv = 6 - ucfr_rfdiv;
1730
1731                 uartclk = clk_get_rate(sport->clk_per);
1732                 uartclk /= ucfr_rfdiv;
1733
1734                 {       /*
1735                          * The next code provides exact computation of
1736                          *   baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1))
1737                          * without need of float support or long long division,
1738                          * which would be required to prevent 32bit arithmetic overflow
1739                          */
1740                         unsigned int mul = ubir + 1;
1741                         unsigned int div = 16 * (ubmr + 1);
1742                         unsigned int rem = uartclk % div;
1743
1744                         baud_raw = (uartclk / div) * mul;
1745                         baud_raw += (rem * mul + div / 2) / div;
1746                         *baud = (baud_raw + 50) / 100 * 100;
1747                 }
1748
1749                 if (*baud != baud_raw)
1750                         pr_info("Console IMX rounded baud rate from %d to %d\n",
1751                                 baud_raw, *baud);
1752         }
1753 }
1754
1755 static int __init
1756 imx_console_setup(struct console *co, char *options)
1757 {
1758         struct imx_port *sport;
1759         int baud = 9600;
1760         int bits = 8;
1761         int parity = 'n';
1762         int flow = 'n';
1763         int retval;
1764
1765         /*
1766          * Check whether an invalid uart number has been specified, and
1767          * if so, search for the first available port that does have
1768          * console support.
1769          */
1770         if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
1771                 co->index = 0;
1772         sport = imx_ports[co->index];
1773         if (sport == NULL)
1774                 return -ENODEV;
1775
1776         /* For setting the registers, we only need to enable the ipg clock. */
1777         retval = clk_prepare_enable(sport->clk_ipg);
1778         if (retval)
1779                 goto error_console;
1780
1781         if (options)
1782                 uart_parse_options(options, &baud, &parity, &bits, &flow);
1783         else
1784                 imx_console_get_options(sport, &baud, &parity, &bits);
1785
1786         imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1787
1788         retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
1789
1790         clk_disable(sport->clk_ipg);
1791         if (retval) {
1792                 clk_unprepare(sport->clk_ipg);
1793                 goto error_console;
1794         }
1795
1796         retval = clk_prepare(sport->clk_per);
1797         if (retval)
1798                 clk_disable_unprepare(sport->clk_ipg);
1799
1800 error_console:
1801         return retval;
1802 }
1803
1804 static struct uart_driver imx_reg;
1805 static struct console imx_console = {
1806         .name           = DEV_NAME,
1807         .write          = imx_console_write,
1808         .device         = uart_console_device,
1809         .setup          = imx_console_setup,
1810         .flags          = CON_PRINTBUFFER,
1811         .index          = -1,
1812         .data           = &imx_reg,
1813 };
1814
1815 #define IMX_CONSOLE     &imx_console
1816
1817 #ifdef CONFIG_OF
1818 static void imx_console_early_putchar(struct uart_port *port, int ch)
1819 {
1820         while (readl_relaxed(port->membase + IMX21_UTS) & UTS_TXFULL)
1821                 cpu_relax();
1822
1823         writel_relaxed(ch, port->membase + URTX0);
1824 }
1825
1826 static void imx_console_early_write(struct console *con, const char *s,
1827                                     unsigned count)
1828 {
1829         struct earlycon_device *dev = con->data;
1830
1831         uart_console_write(&dev->port, s, count, imx_console_early_putchar);
1832 }
1833
1834 static int __init
1835 imx_console_early_setup(struct earlycon_device *dev, const char *opt)
1836 {
1837         if (!dev->port.membase)
1838                 return -ENODEV;
1839
1840         dev->con->write = imx_console_early_write;
1841
1842         return 0;
1843 }
1844 OF_EARLYCON_DECLARE(ec_imx6q, "fsl,imx6q-uart", imx_console_early_setup);
1845 OF_EARLYCON_DECLARE(ec_imx21, "fsl,imx21-uart", imx_console_early_setup);
1846 #endif
1847
1848 #else
1849 #define IMX_CONSOLE     NULL
1850 #endif
1851
1852 static struct uart_driver imx_reg = {
1853         .owner          = THIS_MODULE,
1854         .driver_name    = DRIVER_NAME,
1855         .dev_name       = DEV_NAME,
1856         .major          = SERIAL_IMX_MAJOR,
1857         .minor          = MINOR_START,
1858         .nr             = ARRAY_SIZE(imx_ports),
1859         .cons           = IMX_CONSOLE,
1860 };
1861
1862 #ifdef CONFIG_OF
1863 /*
1864  * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
1865  * could successfully get all information from dt or a negative errno.
1866  */
1867 static int serial_imx_probe_dt(struct imx_port *sport,
1868                 struct platform_device *pdev)
1869 {
1870         struct device_node *np = pdev->dev.of_node;
1871         int ret;
1872
1873         sport->devdata = of_device_get_match_data(&pdev->dev);
1874         if (!sport->devdata)
1875                 /* no device tree device */
1876                 return 1;
1877
1878         ret = of_alias_get_id(np, "serial");
1879         if (ret < 0) {
1880                 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
1881                 return ret;
1882         }
1883         sport->port.line = ret;
1884
1885         if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
1886                 sport->have_rtscts = 1;
1887
1888         if (of_get_property(np, "fsl,dte-mode", NULL))
1889                 sport->dte_mode = 1;
1890
1891         return 0;
1892 }
1893 #else
1894 static inline int serial_imx_probe_dt(struct imx_port *sport,
1895                 struct platform_device *pdev)
1896 {
1897         return 1;
1898 }
1899 #endif
1900
1901 static void serial_imx_probe_pdata(struct imx_port *sport,
1902                 struct platform_device *pdev)
1903 {
1904         struct imxuart_platform_data *pdata = dev_get_platdata(&pdev->dev);
1905
1906         sport->port.line = pdev->id;
1907         sport->devdata = (struct imx_uart_data  *) pdev->id_entry->driver_data;
1908
1909         if (!pdata)
1910                 return;
1911
1912         if (pdata->flags & IMXUART_HAVE_RTSCTS)
1913                 sport->have_rtscts = 1;
1914 }
1915
1916 static int serial_imx_probe(struct platform_device *pdev)
1917 {
1918         struct imx_port *sport;
1919         void __iomem *base;
1920         int ret = 0, reg;
1921         struct resource *res;
1922         int txirq, rxirq, rtsirq;
1923
1924         sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
1925         if (!sport)
1926                 return -ENOMEM;
1927
1928         ret = serial_imx_probe_dt(sport, pdev);
1929         if (ret > 0)
1930                 serial_imx_probe_pdata(sport, pdev);
1931         else if (ret < 0)
1932                 return ret;
1933
1934         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1935         base = devm_ioremap_resource(&pdev->dev, res);
1936         if (IS_ERR(base))
1937                 return PTR_ERR(base);
1938
1939         rxirq = platform_get_irq(pdev, 0);
1940         txirq = platform_get_irq(pdev, 1);
1941         rtsirq = platform_get_irq(pdev, 2);
1942
1943         sport->port.dev = &pdev->dev;
1944         sport->port.mapbase = res->start;
1945         sport->port.membase = base;
1946         sport->port.type = PORT_IMX,
1947         sport->port.iotype = UPIO_MEM;
1948         sport->port.irq = rxirq;
1949         sport->port.fifosize = 32;
1950         sport->port.ops = &imx_pops;
1951         sport->port.rs485_config = imx_rs485_config;
1952         sport->port.rs485.flags =
1953                 SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX;
1954         sport->port.flags = UPF_BOOT_AUTOCONF;
1955         init_timer(&sport->timer);
1956         sport->timer.function = imx_timeout;
1957         sport->timer.data     = (unsigned long)sport;
1958
1959         sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1960         if (IS_ERR(sport->clk_ipg)) {
1961                 ret = PTR_ERR(sport->clk_ipg);
1962                 dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
1963                 return ret;
1964         }
1965
1966         sport->clk_per = devm_clk_get(&pdev->dev, "per");
1967         if (IS_ERR(sport->clk_per)) {
1968                 ret = PTR_ERR(sport->clk_per);
1969                 dev_err(&pdev->dev, "failed to get per clk: %d\n", ret);
1970                 return ret;
1971         }
1972
1973         sport->port.uartclk = clk_get_rate(sport->clk_per);
1974
1975         /* For register access, we only need to enable the ipg clock. */
1976         ret = clk_prepare_enable(sport->clk_ipg);
1977         if (ret)
1978                 return ret;
1979
1980         /* Disable interrupts before requesting them */
1981         reg = readl_relaxed(sport->port.membase + UCR1);
1982         reg &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN |
1983                  UCR1_TXMPTYEN | UCR1_RTSDEN);
1984         writel_relaxed(reg, sport->port.membase + UCR1);
1985
1986         clk_disable_unprepare(sport->clk_ipg);
1987
1988         /*
1989          * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
1990          * chips only have one interrupt.
1991          */
1992         if (txirq > 0) {
1993                 ret = devm_request_irq(&pdev->dev, rxirq, imx_rxint, 0,
1994                                        dev_name(&pdev->dev), sport);
1995                 if (ret)
1996                         return ret;
1997
1998                 ret = devm_request_irq(&pdev->dev, txirq, imx_txint, 0,
1999                                        dev_name(&pdev->dev), sport);
2000                 if (ret)
2001                         return ret;
2002         } else {
2003                 ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
2004                                        dev_name(&pdev->dev), sport);
2005                 if (ret)
2006                         return ret;
2007         }
2008
2009         imx_ports[sport->port.line] = sport;
2010
2011         platform_set_drvdata(pdev, sport);
2012
2013         return uart_add_one_port(&imx_reg, &sport->port);
2014 }
2015
2016 static int serial_imx_remove(struct platform_device *pdev)
2017 {
2018         struct imx_port *sport = platform_get_drvdata(pdev);
2019
2020         return uart_remove_one_port(&imx_reg, &sport->port);
2021 }
2022
2023 static void serial_imx_restore_context(struct imx_port *sport)
2024 {
2025         if (!sport->context_saved)
2026                 return;
2027
2028         writel(sport->saved_reg[4], sport->port.membase + UFCR);
2029         writel(sport->saved_reg[5], sport->port.membase + UESC);
2030         writel(sport->saved_reg[6], sport->port.membase + UTIM);
2031         writel(sport->saved_reg[7], sport->port.membase + UBIR);
2032         writel(sport->saved_reg[8], sport->port.membase + UBMR);
2033         writel(sport->saved_reg[9], sport->port.membase + IMX21_UTS);
2034         writel(sport->saved_reg[0], sport->port.membase + UCR1);
2035         writel(sport->saved_reg[1] | UCR2_SRST, sport->port.membase + UCR2);
2036         writel(sport->saved_reg[2], sport->port.membase + UCR3);
2037         writel(sport->saved_reg[3], sport->port.membase + UCR4);
2038         sport->context_saved = false;
2039 }
2040
2041 static void serial_imx_save_context(struct imx_port *sport)
2042 {
2043         /* Save necessary regs */
2044         sport->saved_reg[0] = readl(sport->port.membase + UCR1);
2045         sport->saved_reg[1] = readl(sport->port.membase + UCR2);
2046         sport->saved_reg[2] = readl(sport->port.membase + UCR3);
2047         sport->saved_reg[3] = readl(sport->port.membase + UCR4);
2048         sport->saved_reg[4] = readl(sport->port.membase + UFCR);
2049         sport->saved_reg[5] = readl(sport->port.membase + UESC);
2050         sport->saved_reg[6] = readl(sport->port.membase + UTIM);
2051         sport->saved_reg[7] = readl(sport->port.membase + UBIR);
2052         sport->saved_reg[8] = readl(sport->port.membase + UBMR);
2053         sport->saved_reg[9] = readl(sport->port.membase + IMX21_UTS);
2054         sport->context_saved = true;
2055 }
2056
2057 static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
2058 {
2059         unsigned int val;
2060
2061         val = readl(sport->port.membase + UCR3);
2062         if (on)
2063                 val |= UCR3_AWAKEN;
2064         else
2065                 val &= ~UCR3_AWAKEN;
2066         writel(val, sport->port.membase + UCR3);
2067
2068         val = readl(sport->port.membase + UCR1);
2069         if (on)
2070                 val |= UCR1_RTSDEN;
2071         else
2072                 val &= ~UCR1_RTSDEN;
2073         writel(val, sport->port.membase + UCR1);
2074 }
2075
2076 static int imx_serial_port_suspend_noirq(struct device *dev)
2077 {
2078         struct platform_device *pdev = to_platform_device(dev);
2079         struct imx_port *sport = platform_get_drvdata(pdev);
2080         int ret;
2081
2082         ret = clk_enable(sport->clk_ipg);
2083         if (ret)
2084                 return ret;
2085
2086         serial_imx_save_context(sport);
2087
2088         clk_disable(sport->clk_ipg);
2089
2090         return 0;
2091 }
2092
2093 static int imx_serial_port_resume_noirq(struct device *dev)
2094 {
2095         struct platform_device *pdev = to_platform_device(dev);
2096         struct imx_port *sport = platform_get_drvdata(pdev);
2097         int ret;
2098
2099         ret = clk_enable(sport->clk_ipg);
2100         if (ret)
2101                 return ret;
2102
2103         serial_imx_restore_context(sport);
2104
2105         clk_disable(sport->clk_ipg);
2106
2107         return 0;
2108 }
2109
2110 static int imx_serial_port_suspend(struct device *dev)
2111 {
2112         struct platform_device *pdev = to_platform_device(dev);
2113         struct imx_port *sport = platform_get_drvdata(pdev);
2114
2115         /* enable wakeup from i.MX UART */
2116         serial_imx_enable_wakeup(sport, true);
2117
2118         uart_suspend_port(&imx_reg, &sport->port);
2119
2120         return 0;
2121 }
2122
2123 static int imx_serial_port_resume(struct device *dev)
2124 {
2125         struct platform_device *pdev = to_platform_device(dev);
2126         struct imx_port *sport = platform_get_drvdata(pdev);
2127
2128         /* disable wakeup from i.MX UART */
2129         serial_imx_enable_wakeup(sport, false);
2130
2131         uart_resume_port(&imx_reg, &sport->port);
2132
2133         return 0;
2134 }
2135
2136 static const struct dev_pm_ops imx_serial_port_pm_ops = {
2137         .suspend_noirq = imx_serial_port_suspend_noirq,
2138         .resume_noirq = imx_serial_port_resume_noirq,
2139         .suspend = imx_serial_port_suspend,
2140         .resume = imx_serial_port_resume,
2141 };
2142
2143 static struct platform_driver serial_imx_driver = {
2144         .probe          = serial_imx_probe,
2145         .remove         = serial_imx_remove,
2146
2147         .id_table       = imx_uart_devtype,
2148         .driver         = {
2149                 .name   = "imx-uart",
2150                 .of_match_table = imx_uart_dt_ids,
2151                 .pm     = &imx_serial_port_pm_ops,
2152         },
2153 };
2154
2155 static int __init imx_serial_init(void)
2156 {
2157         int ret = uart_register_driver(&imx_reg);
2158
2159         if (ret)
2160                 return ret;
2161
2162         ret = platform_driver_register(&serial_imx_driver);
2163         if (ret != 0)
2164                 uart_unregister_driver(&imx_reg);
2165
2166         return ret;
2167 }
2168
2169 static void __exit imx_serial_exit(void)
2170 {
2171         platform_driver_unregister(&serial_imx_driver);
2172         uart_unregister_driver(&imx_reg);
2173 }
2174
2175 module_init(imx_serial_init);
2176 module_exit(imx_serial_exit);
2177
2178 MODULE_AUTHOR("Sascha Hauer");
2179 MODULE_DESCRIPTION("IMX generic serial port driver");
2180 MODULE_LICENSE("GPL");
2181 MODULE_ALIAS("platform:imx-uart");