coresight: etb10: splitting sysFS "status" entry
[cascardo/linux.git] / drivers / hwtracing / coresight / coresight-etb10.c
1 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2  *
3  * Description: CoreSight Embedded Trace Buffer driver
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 and
7  * only version 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14
15 #include <asm/local.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/types.h>
19 #include <linux/device.h>
20 #include <linux/io.h>
21 #include <linux/err.h>
22 #include <linux/fs.h>
23 #include <linux/miscdevice.h>
24 #include <linux/uaccess.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/seq_file.h>
29 #include <linux/coresight.h>
30 #include <linux/amba/bus.h>
31 #include <linux/clk.h>
32 #include <linux/circ_buf.h>
33 #include <linux/mm.h>
34 #include <linux/perf_event.h>
35
36 #include <asm/local.h>
37
38 #include "coresight-priv.h"
39
40 #define ETB_RAM_DEPTH_REG       0x004
41 #define ETB_STATUS_REG          0x00c
42 #define ETB_RAM_READ_DATA_REG   0x010
43 #define ETB_RAM_READ_POINTER    0x014
44 #define ETB_RAM_WRITE_POINTER   0x018
45 #define ETB_TRG                 0x01c
46 #define ETB_CTL_REG             0x020
47 #define ETB_RWD_REG             0x024
48 #define ETB_FFSR                0x300
49 #define ETB_FFCR                0x304
50 #define ETB_ITMISCOP0           0xee0
51 #define ETB_ITTRFLINACK         0xee4
52 #define ETB_ITTRFLIN            0xee8
53 #define ETB_ITATBDATA0          0xeeC
54 #define ETB_ITATBCTR2           0xef0
55 #define ETB_ITATBCTR1           0xef4
56 #define ETB_ITATBCTR0           0xef8
57
58 /* register description */
59 /* STS - 0x00C */
60 #define ETB_STATUS_RAM_FULL     BIT(0)
61 /* CTL - 0x020 */
62 #define ETB_CTL_CAPT_EN         BIT(0)
63 /* FFCR - 0x304 */
64 #define ETB_FFCR_EN_FTC         BIT(0)
65 #define ETB_FFCR_FON_MAN        BIT(6)
66 #define ETB_FFCR_STOP_FI        BIT(12)
67 #define ETB_FFCR_STOP_TRIGGER   BIT(13)
68
69 #define ETB_FFCR_BIT            6
70 #define ETB_FFSR_BIT            1
71 #define ETB_FRAME_SIZE_WORDS    4
72
73 /**
74  * struct cs_buffer - keep track of a recording session' specifics
75  * @cur:        index of the current buffer
76  * @nr_pages:   max number of pages granted to us
77  * @offset:     offset within the current buffer
78  * @data_size:  how much we collected in this run
79  * @lost:       other than zero if we had a HW buffer wrap around
80  * @snapshot:   is this run in snapshot mode
81  * @data_pages: a handle the ring buffer
82  */
83 struct cs_buffers {
84         unsigned int            cur;
85         unsigned int            nr_pages;
86         unsigned long           offset;
87         local_t                 data_size;
88         local_t                 lost;
89         bool                    snapshot;
90         void                    **data_pages;
91 };
92
93 /**
94  * struct etb_drvdata - specifics associated to an ETB component
95  * @base:       memory mapped base address for this component.
96  * @dev:        the device entity associated to this component.
97  * @atclk:      optional clock for the core parts of the ETB.
98  * @csdev:      component vitals needed by the framework.
99  * @miscdev:    specifics to handle "/dev/xyz.etb" entry.
100  * @spinlock:   only one at a time pls.
101  * @reading:    synchronise user space access to etb buffer.
102  * @mode:       this ETB is being used.
103  * @buf:        area of memory where ETB buffer content gets sent.
104  * @buffer_depth: size of @buf.
105  * @trigger_cntr: amount of words to store after a trigger.
106  */
107 struct etb_drvdata {
108         void __iomem            *base;
109         struct device           *dev;
110         struct clk              *atclk;
111         struct coresight_device *csdev;
112         struct miscdevice       miscdev;
113         spinlock_t              spinlock;
114         local_t                 reading;
115         local_t                 mode;
116         u8                      *buf;
117         u32                     buffer_depth;
118         u32                     trigger_cntr;
119 };
120
121 static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
122 {
123         u32 depth = 0;
124
125         pm_runtime_get_sync(drvdata->dev);
126
127         /* RO registers don't need locking */
128         depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
129
130         pm_runtime_put(drvdata->dev);
131         return depth;
132 }
133
134 static void etb_enable_hw(struct etb_drvdata *drvdata)
135 {
136         int i;
137         u32 depth;
138
139         CS_UNLOCK(drvdata->base);
140
141         depth = drvdata->buffer_depth;
142         /* reset write RAM pointer address */
143         writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
144         /* clear entire RAM buffer */
145         for (i = 0; i < depth; i++)
146                 writel_relaxed(0x0, drvdata->base + ETB_RWD_REG);
147
148         /* reset write RAM pointer address */
149         writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
150         /* reset read RAM pointer address */
151         writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
152
153         writel_relaxed(drvdata->trigger_cntr, drvdata->base + ETB_TRG);
154         writel_relaxed(ETB_FFCR_EN_FTC | ETB_FFCR_STOP_TRIGGER,
155                        drvdata->base + ETB_FFCR);
156         /* ETB trace capture enable */
157         writel_relaxed(ETB_CTL_CAPT_EN, drvdata->base + ETB_CTL_REG);
158
159         CS_LOCK(drvdata->base);
160 }
161
162 static int etb_enable(struct coresight_device *csdev, u32 mode)
163 {
164         u32 val;
165         unsigned long flags;
166         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
167
168         val = local_cmpxchg(&drvdata->mode,
169                             CS_MODE_DISABLED, mode);
170         /*
171          * When accessing from Perf, a HW buffer can be handled
172          * by a single trace entity.  In sysFS mode many tracers
173          * can be logging to the same HW buffer.
174          */
175         if (val == CS_MODE_PERF)
176                 return -EBUSY;
177
178         /* Nothing to do, the tracer is already enabled. */
179         if (val == CS_MODE_SYSFS)
180                 goto out;
181
182         spin_lock_irqsave(&drvdata->spinlock, flags);
183         etb_enable_hw(drvdata);
184         spin_unlock_irqrestore(&drvdata->spinlock, flags);
185
186 out:
187         dev_info(drvdata->dev, "ETB enabled\n");
188         return 0;
189 }
190
191 static void etb_disable_hw(struct etb_drvdata *drvdata)
192 {
193         u32 ffcr;
194
195         CS_UNLOCK(drvdata->base);
196
197         ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
198         /* stop formatter when a stop has completed */
199         ffcr |= ETB_FFCR_STOP_FI;
200         writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
201         /* manually generate a flush of the system */
202         ffcr |= ETB_FFCR_FON_MAN;
203         writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
204
205         if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
206                 dev_err(drvdata->dev,
207                         "timeout observed when probing at offset %#x\n",
208                         ETB_FFCR);
209         }
210
211         /* disable trace capture */
212         writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
213
214         if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
215                 dev_err(drvdata->dev,
216                         "timeout observed when probing at offset %#x\n",
217                         ETB_FFCR);
218         }
219
220         CS_LOCK(drvdata->base);
221 }
222
223 static void etb_dump_hw(struct etb_drvdata *drvdata)
224 {
225         int i;
226         u8 *buf_ptr;
227         u32 read_data, depth;
228         u32 read_ptr, write_ptr;
229         u32 frame_off, frame_endoff;
230
231         CS_UNLOCK(drvdata->base);
232
233         read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
234         write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
235
236         frame_off = write_ptr % ETB_FRAME_SIZE_WORDS;
237         frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off;
238         if (frame_off) {
239                 dev_err(drvdata->dev,
240                         "write_ptr: %lu not aligned to formatter frame size\n",
241                         (unsigned long)write_ptr);
242                 dev_err(drvdata->dev, "frameoff: %lu, frame_endoff: %lu\n",
243                         (unsigned long)frame_off, (unsigned long)frame_endoff);
244                 write_ptr += frame_endoff;
245         }
246
247         if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
248                       & ETB_STATUS_RAM_FULL) == 0)
249                 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
250         else
251                 writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
252
253         depth = drvdata->buffer_depth;
254         buf_ptr = drvdata->buf;
255         for (i = 0; i < depth; i++) {
256                 read_data = readl_relaxed(drvdata->base +
257                                           ETB_RAM_READ_DATA_REG);
258                 *buf_ptr++ = read_data >> 0;
259                 *buf_ptr++ = read_data >> 8;
260                 *buf_ptr++ = read_data >> 16;
261                 *buf_ptr++ = read_data >> 24;
262         }
263
264         if (frame_off) {
265                 buf_ptr -= (frame_endoff * 4);
266                 for (i = 0; i < frame_endoff; i++) {
267                         *buf_ptr++ = 0x0;
268                         *buf_ptr++ = 0x0;
269                         *buf_ptr++ = 0x0;
270                         *buf_ptr++ = 0x0;
271                 }
272         }
273
274         writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
275
276         CS_LOCK(drvdata->base);
277 }
278
279 static void etb_disable(struct coresight_device *csdev)
280 {
281         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
282         unsigned long flags;
283
284         spin_lock_irqsave(&drvdata->spinlock, flags);
285         etb_disable_hw(drvdata);
286         etb_dump_hw(drvdata);
287         spin_unlock_irqrestore(&drvdata->spinlock, flags);
288
289         local_set(&drvdata->mode, CS_MODE_DISABLED);
290
291         dev_info(drvdata->dev, "ETB disabled\n");
292 }
293
294 static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
295                               void **pages, int nr_pages, bool overwrite)
296 {
297         int node;
298         struct cs_buffers *buf;
299
300         if (cpu == -1)
301                 cpu = smp_processor_id();
302         node = cpu_to_node(cpu);
303
304         buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
305         if (!buf)
306                 return NULL;
307
308         buf->snapshot = overwrite;
309         buf->nr_pages = nr_pages;
310         buf->data_pages = pages;
311
312         return buf;
313 }
314
315 static void etb_free_buffer(void *config)
316 {
317         struct cs_buffers *buf = config;
318
319         kfree(buf);
320 }
321
322 static int etb_set_buffer(struct coresight_device *csdev,
323                           struct perf_output_handle *handle,
324                           void *sink_config)
325 {
326         int ret = 0;
327         unsigned long head;
328         struct cs_buffers *buf = sink_config;
329
330         /* wrap head around to the amount of space we have */
331         head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
332
333         /* find the page to write to */
334         buf->cur = head / PAGE_SIZE;
335
336         /* and offset within that page */
337         buf->offset = head % PAGE_SIZE;
338
339         local_set(&buf->data_size, 0);
340
341         return ret;
342 }
343
344 static unsigned long etb_reset_buffer(struct coresight_device *csdev,
345                                       struct perf_output_handle *handle,
346                                       void *sink_config, bool *lost)
347 {
348         unsigned long size = 0;
349         struct cs_buffers *buf = sink_config;
350
351         if (buf) {
352                 /*
353                  * In snapshot mode ->data_size holds the new address of the
354                  * ring buffer's head.  The size itself is the whole address
355                  * range since we want the latest information.
356                  */
357                 if (buf->snapshot)
358                         handle->head = local_xchg(&buf->data_size,
359                                                   buf->nr_pages << PAGE_SHIFT);
360
361                 /*
362                  * Tell the tracer PMU how much we got in this run and if
363                  * something went wrong along the way.  Nobody else can use
364                  * this cs_buffers instance until we are done.  As such
365                  * resetting parameters here and squaring off with the ring
366                  * buffer API in the tracer PMU is fine.
367                  */
368                 *lost = !!local_xchg(&buf->lost, 0);
369                 size = local_xchg(&buf->data_size, 0);
370         }
371
372         return size;
373 }
374
375 static void etb_update_buffer(struct coresight_device *csdev,
376                               struct perf_output_handle *handle,
377                               void *sink_config)
378 {
379         int i, cur;
380         u8 *buf_ptr;
381         u32 read_ptr, write_ptr, capacity;
382         u32 status, read_data, to_read;
383         unsigned long offset;
384         struct cs_buffers *buf = sink_config;
385         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
386
387         if (!buf)
388                 return;
389
390         capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
391
392         CS_UNLOCK(drvdata->base);
393         etb_disable_hw(drvdata);
394
395         /* unit is in words, not bytes */
396         read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
397         write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
398
399         /*
400          * Entries should be aligned to the frame size.  If they are not
401          * go back to the last alignement point to give decoding tools a
402          * chance to fix things.
403          */
404         if (write_ptr % ETB_FRAME_SIZE_WORDS) {
405                 dev_err(drvdata->dev,
406                         "write_ptr: %lu not aligned to formatter frame size\n",
407                         (unsigned long)write_ptr);
408
409                 write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
410                 local_inc(&buf->lost);
411         }
412
413         /*
414          * Get a hold of the status register and see if a wrap around
415          * has occurred.  If so adjust things accordingly.  Otherwise
416          * start at the beginning and go until the write pointer has
417          * been reached.
418          */
419         status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
420         if (status & ETB_STATUS_RAM_FULL) {
421                 local_inc(&buf->lost);
422                 to_read = capacity;
423                 read_ptr = write_ptr;
424         } else {
425                 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
426                 to_read *= ETB_FRAME_SIZE_WORDS;
427         }
428
429         /*
430          * Make sure we don't overwrite data that hasn't been consumed yet.
431          * It is entirely possible that the HW buffer has more data than the
432          * ring buffer can currently handle.  If so adjust the start address
433          * to take only the last traces.
434          *
435          * In snapshot mode we are looking to get the latest traces only and as
436          * such, we don't care about not overwriting data that hasn't been
437          * processed by user space.
438          */
439         if (!buf->snapshot && to_read > handle->size) {
440                 u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
441
442                 /* The new read pointer must be frame size aligned */
443                 to_read -= handle->size & mask;
444                 /*
445                  * Move the RAM read pointer up, keeping in mind that
446                  * everything is in frame size units.
447                  */
448                 read_ptr = (write_ptr + drvdata->buffer_depth) -
449                                         to_read / ETB_FRAME_SIZE_WORDS;
450                 /* Wrap around if need be*/
451                 read_ptr &= ~(drvdata->buffer_depth - 1);
452                 /* let the decoder know we've skipped ahead */
453                 local_inc(&buf->lost);
454         }
455
456         /* finally tell HW where we want to start reading from */
457         writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
458
459         cur = buf->cur;
460         offset = buf->offset;
461         for (i = 0; i < to_read; i += 4) {
462                 buf_ptr = buf->data_pages[cur] + offset;
463                 read_data = readl_relaxed(drvdata->base +
464                                           ETB_RAM_READ_DATA_REG);
465                 *buf_ptr++ = read_data >> 0;
466                 *buf_ptr++ = read_data >> 8;
467                 *buf_ptr++ = read_data >> 16;
468                 *buf_ptr++ = read_data >> 24;
469
470                 offset += 4;
471                 if (offset >= PAGE_SIZE) {
472                         offset = 0;
473                         cur++;
474                         /* wrap around at the end of the buffer */
475                         cur &= buf->nr_pages - 1;
476                 }
477         }
478
479         /* reset ETB buffer for next run */
480         writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
481         writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
482
483         /*
484          * In snapshot mode all we have to do is communicate to
485          * perf_aux_output_end() the address of the current head.  In full
486          * trace mode the same function expects a size to move rb->aux_head
487          * forward.
488          */
489         if (buf->snapshot)
490                 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
491         else
492                 local_add(to_read, &buf->data_size);
493
494         etb_enable_hw(drvdata);
495         CS_LOCK(drvdata->base);
496 }
497
498 static const struct coresight_ops_sink etb_sink_ops = {
499         .enable         = etb_enable,
500         .disable        = etb_disable,
501         .alloc_buffer   = etb_alloc_buffer,
502         .free_buffer    = etb_free_buffer,
503         .set_buffer     = etb_set_buffer,
504         .reset_buffer   = etb_reset_buffer,
505         .update_buffer  = etb_update_buffer,
506 };
507
508 static const struct coresight_ops etb_cs_ops = {
509         .sink_ops       = &etb_sink_ops,
510 };
511
512 static void etb_dump(struct etb_drvdata *drvdata)
513 {
514         unsigned long flags;
515
516         spin_lock_irqsave(&drvdata->spinlock, flags);
517         if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
518                 etb_disable_hw(drvdata);
519                 etb_dump_hw(drvdata);
520                 etb_enable_hw(drvdata);
521         }
522         spin_unlock_irqrestore(&drvdata->spinlock, flags);
523
524         dev_info(drvdata->dev, "ETB dumped\n");
525 }
526
527 static int etb_open(struct inode *inode, struct file *file)
528 {
529         struct etb_drvdata *drvdata = container_of(file->private_data,
530                                                    struct etb_drvdata, miscdev);
531
532         if (local_cmpxchg(&drvdata->reading, 0, 1))
533                 return -EBUSY;
534
535         dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
536         return 0;
537 }
538
539 static ssize_t etb_read(struct file *file, char __user *data,
540                                 size_t len, loff_t *ppos)
541 {
542         u32 depth;
543         struct etb_drvdata *drvdata = container_of(file->private_data,
544                                                    struct etb_drvdata, miscdev);
545
546         etb_dump(drvdata);
547
548         depth = drvdata->buffer_depth;
549         if (*ppos + len > depth * 4)
550                 len = depth * 4 - *ppos;
551
552         if (copy_to_user(data, drvdata->buf + *ppos, len)) {
553                 dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
554                 return -EFAULT;
555         }
556
557         *ppos += len;
558
559         dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
560                 __func__, len, (int)(depth * 4 - *ppos));
561         return len;
562 }
563
564 static int etb_release(struct inode *inode, struct file *file)
565 {
566         struct etb_drvdata *drvdata = container_of(file->private_data,
567                                                    struct etb_drvdata, miscdev);
568         local_set(&drvdata->reading, 0);
569
570         dev_dbg(drvdata->dev, "%s: released\n", __func__);
571         return 0;
572 }
573
574 static const struct file_operations etb_fops = {
575         .owner          = THIS_MODULE,
576         .open           = etb_open,
577         .read           = etb_read,
578         .release        = etb_release,
579         .llseek         = no_llseek,
580 };
581
582 #define coresight_etb10_simple_func(name, offset)                       \
583         coresight_simple_func(struct etb_drvdata, name, offset)
584
585 coresight_etb10_simple_func(rdp, ETB_RAM_DEPTH_REG);
586 coresight_etb10_simple_func(sts, ETB_STATUS_REG);
587 coresight_etb10_simple_func(rrp, ETB_RAM_READ_POINTER);
588 coresight_etb10_simple_func(rwp, ETB_RAM_WRITE_POINTER);
589 coresight_etb10_simple_func(trg, ETB_TRG);
590 coresight_etb10_simple_func(ctl, ETB_CTL_REG);
591 coresight_etb10_simple_func(ffsr, ETB_FFSR);
592 coresight_etb10_simple_func(ffcr, ETB_FFCR);
593
594 static struct attribute *coresight_etb_mgmt_attrs[] = {
595         &dev_attr_rdp.attr,
596         &dev_attr_sts.attr,
597         &dev_attr_rrp.attr,
598         &dev_attr_rwp.attr,
599         &dev_attr_trg.attr,
600         &dev_attr_ctl.attr,
601         &dev_attr_ffsr.attr,
602         &dev_attr_ffcr.attr,
603         NULL,
604 };
605
606 static ssize_t trigger_cntr_show(struct device *dev,
607                             struct device_attribute *attr, char *buf)
608 {
609         struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
610         unsigned long val = drvdata->trigger_cntr;
611
612         return sprintf(buf, "%#lx\n", val);
613 }
614
615 static ssize_t trigger_cntr_store(struct device *dev,
616                              struct device_attribute *attr,
617                              const char *buf, size_t size)
618 {
619         int ret;
620         unsigned long val;
621         struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
622
623         ret = kstrtoul(buf, 16, &val);
624         if (ret)
625                 return ret;
626
627         drvdata->trigger_cntr = val;
628         return size;
629 }
630 static DEVICE_ATTR_RW(trigger_cntr);
631
632 static struct attribute *coresight_etb_attrs[] = {
633         &dev_attr_trigger_cntr.attr,
634         NULL,
635 };
636
637 static const struct attribute_group coresight_etb_group = {
638         .attrs = coresight_etb_attrs,
639 };
640
641 static const struct attribute_group coresight_etb_mgmt_group = {
642         .attrs = coresight_etb_mgmt_attrs,
643         .name = "mgmt",
644 };
645
646 const struct attribute_group *coresight_etb_groups[] = {
647         &coresight_etb_group,
648         &coresight_etb_mgmt_group,
649         NULL,
650 };
651
652 static int etb_probe(struct amba_device *adev, const struct amba_id *id)
653 {
654         int ret;
655         void __iomem *base;
656         struct device *dev = &adev->dev;
657         struct coresight_platform_data *pdata = NULL;
658         struct etb_drvdata *drvdata;
659         struct resource *res = &adev->res;
660         struct coresight_desc *desc;
661         struct device_node *np = adev->dev.of_node;
662
663         if (np) {
664                 pdata = of_get_coresight_platform_data(dev, np);
665                 if (IS_ERR(pdata))
666                         return PTR_ERR(pdata);
667                 adev->dev.platform_data = pdata;
668         }
669
670         drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
671         if (!drvdata)
672                 return -ENOMEM;
673
674         drvdata->dev = &adev->dev;
675         drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
676         if (!IS_ERR(drvdata->atclk)) {
677                 ret = clk_prepare_enable(drvdata->atclk);
678                 if (ret)
679                         return ret;
680         }
681         dev_set_drvdata(dev, drvdata);
682
683         /* validity for the resource is already checked by the AMBA core */
684         base = devm_ioremap_resource(dev, res);
685         if (IS_ERR(base))
686                 return PTR_ERR(base);
687
688         drvdata->base = base;
689
690         spin_lock_init(&drvdata->spinlock);
691
692         drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
693         pm_runtime_put(&adev->dev);
694
695         if (drvdata->buffer_depth & 0x80000000)
696                 return -EINVAL;
697
698         drvdata->buf = devm_kzalloc(dev,
699                                     drvdata->buffer_depth * 4, GFP_KERNEL);
700         if (!drvdata->buf) {
701                 dev_err(dev, "Failed to allocate %u bytes for buffer data\n",
702                         drvdata->buffer_depth * 4);
703                 return -ENOMEM;
704         }
705
706         desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
707         if (!desc)
708                 return -ENOMEM;
709
710         desc->type = CORESIGHT_DEV_TYPE_SINK;
711         desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
712         desc->ops = &etb_cs_ops;
713         desc->pdata = pdata;
714         desc->dev = dev;
715         desc->groups = coresight_etb_groups;
716         drvdata->csdev = coresight_register(desc);
717         if (IS_ERR(drvdata->csdev))
718                 return PTR_ERR(drvdata->csdev);
719
720         drvdata->miscdev.name = pdata->name;
721         drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
722         drvdata->miscdev.fops = &etb_fops;
723         ret = misc_register(&drvdata->miscdev);
724         if (ret)
725                 goto err_misc_register;
726
727         dev_info(dev, "ETB initialized\n");
728         return 0;
729
730 err_misc_register:
731         coresight_unregister(drvdata->csdev);
732         return ret;
733 }
734
735 #ifdef CONFIG_PM
736 static int etb_runtime_suspend(struct device *dev)
737 {
738         struct etb_drvdata *drvdata = dev_get_drvdata(dev);
739
740         if (drvdata && !IS_ERR(drvdata->atclk))
741                 clk_disable_unprepare(drvdata->atclk);
742
743         return 0;
744 }
745
746 static int etb_runtime_resume(struct device *dev)
747 {
748         struct etb_drvdata *drvdata = dev_get_drvdata(dev);
749
750         if (drvdata && !IS_ERR(drvdata->atclk))
751                 clk_prepare_enable(drvdata->atclk);
752
753         return 0;
754 }
755 #endif
756
757 static const struct dev_pm_ops etb_dev_pm_ops = {
758         SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
759 };
760
761 static struct amba_id etb_ids[] = {
762         {
763                 .id     = 0x0003b907,
764                 .mask   = 0x0003ffff,
765         },
766         { 0, 0},
767 };
768
769 static struct amba_driver etb_driver = {
770         .drv = {
771                 .name   = "coresight-etb10",
772                 .owner  = THIS_MODULE,
773                 .pm     = &etb_dev_pm_ops,
774                 .suppress_bind_attrs = true,
775
776         },
777         .probe          = etb_probe,
778         .id_table       = etb_ids,
779 };
780 builtin_amba_driver(etb_driver);