geneve: avoid using stale geneve socket.
[cascardo/linux.git] / drivers / input / rmi4 / rmi_spi.c
1 /*
2  * Copyright (c) 2011-2016 Synaptics Incorporated
3  * Copyright (c) 2011 Unixphere
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/rmi.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
15 #include <linux/irq.h>
16 #include <linux/of.h>
17 #include "rmi_driver.h"
18
19 #define RMI_SPI_DEFAULT_XFER_BUF_SIZE   64
20
21 #define RMI_PAGE_SELECT_REGISTER        0x00FF
22 #define RMI_SPI_PAGE(addr)              (((addr) >> 8) & 0x80)
23 #define RMI_SPI_XFER_SIZE_LIMIT         255
24
25 #define BUFFER_SIZE_INCREMENT 32
26
27 enum rmi_spi_op {
28         RMI_SPI_WRITE = 0,
29         RMI_SPI_READ,
30         RMI_SPI_V2_READ_UNIFIED,
31         RMI_SPI_V2_READ_SPLIT,
32         RMI_SPI_V2_WRITE,
33 };
34
35 struct rmi_spi_cmd {
36         enum rmi_spi_op op;
37         u16 addr;
38 };
39
40 struct rmi_spi_xport {
41         struct rmi_transport_dev xport;
42         struct spi_device *spi;
43
44         struct mutex page_mutex;
45         int page;
46
47         int irq;
48
49         u8 *rx_buf;
50         u8 *tx_buf;
51         int xfer_buf_size;
52
53         struct spi_transfer *rx_xfers;
54         struct spi_transfer *tx_xfers;
55         int rx_xfer_count;
56         int tx_xfer_count;
57 };
58
59 static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
60 {
61         struct spi_device *spi = rmi_spi->spi;
62         int buf_size = rmi_spi->xfer_buf_size
63                 ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
64         struct spi_transfer *xfer_buf;
65         void *buf;
66         void *tmp;
67
68         while (buf_size < len)
69                 buf_size *= 2;
70
71         if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
72                 buf_size = RMI_SPI_XFER_SIZE_LIMIT;
73
74         tmp = rmi_spi->rx_buf;
75         buf = devm_kzalloc(&spi->dev, buf_size * 2,
76                                 GFP_KERNEL | GFP_DMA);
77         if (!buf)
78                 return -ENOMEM;
79
80         rmi_spi->rx_buf = buf;
81         rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
82         rmi_spi->xfer_buf_size = buf_size;
83
84         if (tmp)
85                 devm_kfree(&spi->dev, tmp);
86
87         if (rmi_spi->xport.pdata.spi_data.read_delay_us)
88                 rmi_spi->rx_xfer_count = buf_size;
89         else
90                 rmi_spi->rx_xfer_count = 1;
91
92         if (rmi_spi->xport.pdata.spi_data.write_delay_us)
93                 rmi_spi->tx_xfer_count = buf_size;
94         else
95                 rmi_spi->tx_xfer_count = 1;
96
97         /*
98          * Allocate a pool of spi_transfer buffers for devices which need
99          * per byte delays.
100          */
101         tmp = rmi_spi->rx_xfers;
102         xfer_buf = devm_kzalloc(&spi->dev,
103                 (rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count)
104                 * sizeof(struct spi_transfer), GFP_KERNEL);
105         if (!xfer_buf)
106                 return -ENOMEM;
107
108         rmi_spi->rx_xfers = xfer_buf;
109         rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
110
111         if (tmp)
112                 devm_kfree(&spi->dev, tmp);
113
114         return 0;
115 }
116
117 static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
118                         const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
119                         int tx_len, u8 *rx_buf, int rx_len)
120 {
121         struct spi_device *spi = rmi_spi->spi;
122         struct rmi_device_platform_data_spi *spi_data =
123                                         &rmi_spi->xport.pdata.spi_data;
124         struct spi_message msg;
125         struct spi_transfer *xfer;
126         int ret = 0;
127         int len;
128         int cmd_len = 0;
129         int total_tx_len;
130         int i;
131         u16 addr = cmd->addr;
132
133         spi_message_init(&msg);
134
135         switch (cmd->op) {
136         case RMI_SPI_WRITE:
137         case RMI_SPI_READ:
138                 cmd_len += 2;
139                 break;
140         case RMI_SPI_V2_READ_UNIFIED:
141         case RMI_SPI_V2_READ_SPLIT:
142         case RMI_SPI_V2_WRITE:
143                 cmd_len += 4;
144                 break;
145         }
146
147         total_tx_len = cmd_len + tx_len;
148         len = max(total_tx_len, rx_len);
149
150         if (len > RMI_SPI_XFER_SIZE_LIMIT)
151                 return -EINVAL;
152
153         if (rmi_spi->xfer_buf_size < len)
154                 rmi_spi_manage_pools(rmi_spi, len);
155
156         if (addr == 0)
157                 /*
158                  * SPI needs an address. Use 0x7FF if we want to keep
159                  * reading from the last position of the register pointer.
160                  */
161                 addr = 0x7FF;
162
163         switch (cmd->op) {
164         case RMI_SPI_WRITE:
165                 rmi_spi->tx_buf[0] = (addr >> 8);
166                 rmi_spi->tx_buf[1] = addr & 0xFF;
167                 break;
168         case RMI_SPI_READ:
169                 rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
170                 rmi_spi->tx_buf[1] = addr & 0xFF;
171                 break;
172         case RMI_SPI_V2_READ_UNIFIED:
173                 break;
174         case RMI_SPI_V2_READ_SPLIT:
175                 break;
176         case RMI_SPI_V2_WRITE:
177                 rmi_spi->tx_buf[0] = 0x40;
178                 rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
179                 rmi_spi->tx_buf[2] = addr & 0xFF;
180                 rmi_spi->tx_buf[3] = tx_len;
181                 break;
182         }
183
184         if (tx_buf)
185                 memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
186
187         if (rmi_spi->tx_xfer_count > 1) {
188                 for (i = 0; i < total_tx_len; i++) {
189                         xfer = &rmi_spi->tx_xfers[i];
190                         memset(xfer, 0, sizeof(struct spi_transfer));
191                         xfer->tx_buf = &rmi_spi->tx_buf[i];
192                         xfer->len = 1;
193                         xfer->delay_usecs = spi_data->write_delay_us;
194                         spi_message_add_tail(xfer, &msg);
195                 }
196         } else {
197                 xfer = rmi_spi->tx_xfers;
198                 memset(xfer, 0, sizeof(struct spi_transfer));
199                 xfer->tx_buf = rmi_spi->tx_buf;
200                 xfer->len = total_tx_len;
201                 spi_message_add_tail(xfer, &msg);
202         }
203
204         rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
205                 __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
206                 total_tx_len, total_tx_len, rmi_spi->tx_buf);
207
208         if (rx_buf) {
209                 if (rmi_spi->rx_xfer_count > 1) {
210                         for (i = 0; i < rx_len; i++) {
211                                 xfer = &rmi_spi->rx_xfers[i];
212                                 memset(xfer, 0, sizeof(struct spi_transfer));
213                                 xfer->rx_buf = &rmi_spi->rx_buf[i];
214                                 xfer->len = 1;
215                                 xfer->delay_usecs = spi_data->read_delay_us;
216                                 spi_message_add_tail(xfer, &msg);
217                         }
218                 } else {
219                         xfer = rmi_spi->rx_xfers;
220                         memset(xfer, 0, sizeof(struct spi_transfer));
221                         xfer->rx_buf = rmi_spi->rx_buf;
222                         xfer->len = rx_len;
223                         spi_message_add_tail(xfer, &msg);
224                 }
225         }
226
227         ret = spi_sync(spi, &msg);
228         if (ret < 0) {
229                 dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
230                 return ret;
231         }
232
233         if (rx_buf) {
234                 memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
235                 rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
236                         __func__, rx_len, rx_len, rx_buf);
237         }
238
239         return 0;
240 }
241
242 /*
243  * rmi_set_page - Set RMI page
244  * @xport: The pointer to the rmi_transport_dev struct
245  * @page: The new page address.
246  *
247  * RMI devices have 16-bit addressing, but some of the transport
248  * implementations (like SMBus) only have 8-bit addressing. So RMI implements
249  * a page address at 0xff of every page so we can reliable page addresses
250  * every 256 registers.
251  *
252  * The page_mutex lock must be held when this function is entered.
253  *
254  * Returns zero on success, non-zero on failure.
255  */
256 static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
257 {
258         struct rmi_spi_cmd cmd;
259         int ret;
260
261         cmd.op = RMI_SPI_WRITE;
262         cmd.addr = RMI_PAGE_SELECT_REGISTER;
263
264         ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
265
266         if (ret)
267                 rmi_spi->page = page;
268
269         return ret;
270 }
271
272 static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
273                                const void *buf, size_t len)
274 {
275         struct rmi_spi_xport *rmi_spi =
276                 container_of(xport, struct rmi_spi_xport, xport);
277         struct rmi_spi_cmd cmd;
278         int ret;
279
280         mutex_lock(&rmi_spi->page_mutex);
281
282         if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
283                 ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
284                 if (ret)
285                         goto exit;
286         }
287
288         cmd.op = RMI_SPI_WRITE;
289         cmd.addr = addr;
290
291         ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
292
293 exit:
294         mutex_unlock(&rmi_spi->page_mutex);
295         return ret;
296 }
297
298 static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
299                               void *buf, size_t len)
300 {
301         struct rmi_spi_xport *rmi_spi =
302                 container_of(xport, struct rmi_spi_xport, xport);
303         struct rmi_spi_cmd cmd;
304         int ret;
305
306         mutex_lock(&rmi_spi->page_mutex);
307
308         if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
309                 ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
310                 if (ret)
311                         goto exit;
312         }
313
314         cmd.op = RMI_SPI_READ;
315         cmd.addr = addr;
316
317         ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
318
319 exit:
320         mutex_unlock(&rmi_spi->page_mutex);
321         return ret;
322 }
323
324 static const struct rmi_transport_ops rmi_spi_ops = {
325         .write_block    = rmi_spi_write_block,
326         .read_block     = rmi_spi_read_block,
327 };
328
329 static irqreturn_t rmi_spi_irq(int irq, void *dev_id)
330 {
331         struct rmi_spi_xport *rmi_spi = dev_id;
332         struct rmi_device *rmi_dev = rmi_spi->xport.rmi_dev;
333         int ret;
334
335         ret = rmi_process_interrupt_requests(rmi_dev);
336         if (ret)
337                 rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
338                         "Failed to process interrupt request: %d\n", ret);
339
340         return IRQ_HANDLED;
341 }
342
343 static int rmi_spi_init_irq(struct spi_device *spi)
344 {
345         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
346         int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_spi->irq));
347         int ret;
348
349         if (!irq_flags)
350                 irq_flags = IRQF_TRIGGER_LOW;
351
352         ret = devm_request_threaded_irq(&spi->dev, rmi_spi->irq, NULL,
353                         rmi_spi_irq, irq_flags | IRQF_ONESHOT,
354                         dev_name(&spi->dev), rmi_spi);
355         if (ret < 0) {
356                 dev_warn(&spi->dev, "Failed to register interrupt %d\n",
357                         rmi_spi->irq);
358                 return ret;
359         }
360
361         return 0;
362 }
363
364 #ifdef CONFIG_OF
365 static int rmi_spi_of_probe(struct spi_device *spi,
366                         struct rmi_device_platform_data *pdata)
367 {
368         struct device *dev = &spi->dev;
369         int retval;
370
371         retval = rmi_of_property_read_u32(dev,
372                         &pdata->spi_data.read_delay_us,
373                         "spi-rx-delay-us", 1);
374         if (retval)
375                 return retval;
376
377         retval = rmi_of_property_read_u32(dev,
378                         &pdata->spi_data.write_delay_us,
379                         "spi-tx-delay-us", 1);
380         if (retval)
381                 return retval;
382
383         return 0;
384 }
385
386 static const struct of_device_id rmi_spi_of_match[] = {
387         { .compatible = "syna,rmi4-spi" },
388         {},
389 };
390 MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
391 #else
392 static inline int rmi_spi_of_probe(struct spi_device *spi,
393                                 struct rmi_device_platform_data *pdata)
394 {
395         return -ENODEV;
396 }
397 #endif
398
399 static int rmi_spi_probe(struct spi_device *spi)
400 {
401         struct rmi_spi_xport *rmi_spi;
402         struct rmi_device_platform_data *pdata;
403         struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
404         int retval;
405
406         if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
407                 return -EINVAL;
408
409         rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
410                         GFP_KERNEL);
411         if (!rmi_spi)
412                 return -ENOMEM;
413
414         pdata = &rmi_spi->xport.pdata;
415
416         if (spi->dev.of_node) {
417                 retval = rmi_spi_of_probe(spi, pdata);
418                 if (retval)
419                         return retval;
420         } else if (spi_pdata) {
421                 *pdata = *spi_pdata;
422         }
423
424         if (pdata->spi_data.bits_per_word)
425                 spi->bits_per_word = pdata->spi_data.bits_per_word;
426
427         if (pdata->spi_data.mode)
428                 spi->mode = pdata->spi_data.mode;
429
430         retval = spi_setup(spi);
431         if (retval < 0) {
432                 dev_err(&spi->dev, "spi_setup failed!\n");
433                 return retval;
434         }
435
436         if (spi->irq > 0)
437                 rmi_spi->irq = spi->irq;
438
439         rmi_spi->spi = spi;
440         mutex_init(&rmi_spi->page_mutex);
441
442         rmi_spi->xport.dev = &spi->dev;
443         rmi_spi->xport.proto_name = "spi";
444         rmi_spi->xport.ops = &rmi_spi_ops;
445
446         spi_set_drvdata(spi, rmi_spi);
447
448         retval = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
449         if (retval)
450                 return retval;
451
452         /*
453          * Setting the page to zero will (a) make sure the PSR is in a
454          * known state, and (b) make sure we can talk to the device.
455          */
456         retval = rmi_set_page(rmi_spi, 0);
457         if (retval) {
458                 dev_err(&spi->dev, "Failed to set page select to 0.\n");
459                 return retval;
460         }
461
462         retval = rmi_register_transport_device(&rmi_spi->xport);
463         if (retval) {
464                 dev_err(&spi->dev, "failed to register transport.\n");
465                 return retval;
466         }
467
468         retval = rmi_spi_init_irq(spi);
469         if (retval < 0)
470                 return retval;
471
472         dev_info(&spi->dev, "registered RMI SPI driver\n");
473         return 0;
474 }
475
476 static int rmi_spi_remove(struct spi_device *spi)
477 {
478         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
479
480         rmi_unregister_transport_device(&rmi_spi->xport);
481
482         return 0;
483 }
484
485 #ifdef CONFIG_PM_SLEEP
486 static int rmi_spi_suspend(struct device *dev)
487 {
488         struct spi_device *spi = to_spi_device(dev);
489         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
490         int ret;
491
492         ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
493         if (ret)
494                 dev_warn(dev, "Failed to resume device: %d\n", ret);
495
496         disable_irq(rmi_spi->irq);
497         if (device_may_wakeup(&spi->dev)) {
498                 ret = enable_irq_wake(rmi_spi->irq);
499                 if (!ret)
500                         dev_warn(dev, "Failed to enable irq for wake: %d\n",
501                                 ret);
502         }
503         return ret;
504 }
505
506 static int rmi_spi_resume(struct device *dev)
507 {
508         struct spi_device *spi = to_spi_device(dev);
509         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
510         int ret;
511
512         enable_irq(rmi_spi->irq);
513         if (device_may_wakeup(&spi->dev)) {
514                 ret = disable_irq_wake(rmi_spi->irq);
515                 if (!ret)
516                         dev_warn(dev, "Failed to disable irq for wake: %d\n",
517                                 ret);
518         }
519
520         ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
521         if (ret)
522                 dev_warn(dev, "Failed to resume device: %d\n", ret);
523
524         return ret;
525 }
526 #endif
527
528 #ifdef CONFIG_PM
529 static int rmi_spi_runtime_suspend(struct device *dev)
530 {
531         struct spi_device *spi = to_spi_device(dev);
532         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
533         int ret;
534
535         ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
536         if (ret)
537                 dev_warn(dev, "Failed to resume device: %d\n", ret);
538
539         disable_irq(rmi_spi->irq);
540
541         return 0;
542 }
543
544 static int rmi_spi_runtime_resume(struct device *dev)
545 {
546         struct spi_device *spi = to_spi_device(dev);
547         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
548         int ret;
549
550         enable_irq(rmi_spi->irq);
551
552         ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
553         if (ret)
554                 dev_warn(dev, "Failed to resume device: %d\n", ret);
555
556         return 0;
557 }
558 #endif
559
560 static const struct dev_pm_ops rmi_spi_pm = {
561         SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
562         SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
563                            NULL)
564 };
565
566 static const struct spi_device_id rmi_id[] = {
567         { "rmi4_spi", 0 },
568         { }
569 };
570 MODULE_DEVICE_TABLE(spi, rmi_id);
571
572 static struct spi_driver rmi_spi_driver = {
573         .driver = {
574                 .name   = "rmi4_spi",
575                 .pm     = &rmi_spi_pm,
576                 .of_match_table = of_match_ptr(rmi_spi_of_match),
577         },
578         .id_table       = rmi_id,
579         .probe          = rmi_spi_probe,
580         .remove         = rmi_spi_remove,
581 };
582
583 module_spi_driver(rmi_spi_driver);
584
585 MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
586 MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
587 MODULE_DESCRIPTION("RMI SPI driver");
588 MODULE_LICENSE("GPL");
589 MODULE_VERSION(RMI_DRIVER_VERSION);