coresight: tmc: Delete an unnecessary check before the function call "kfree"
[cascardo/linux.git] / drivers / hwtracing / coresight / coresight-tmc-etf.c
1 /*
2  * Copyright(C) 2016 Linaro Limited. All rights reserved.
3  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/circ_buf.h>
19 #include <linux/coresight.h>
20 #include <linux/perf_event.h>
21 #include <linux/slab.h>
22 #include "coresight-priv.h"
23 #include "coresight-tmc.h"
24
25 void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
26 {
27         CS_UNLOCK(drvdata->base);
28
29         /* Wait for TMCSReady bit to be set */
30         tmc_wait_for_tmcready(drvdata);
31
32         writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
33         writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
34                        TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
35                        TMC_FFCR_TRIGON_TRIGIN,
36                        drvdata->base + TMC_FFCR);
37
38         writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
39         tmc_enable_hw(drvdata);
40
41         CS_LOCK(drvdata->base);
42 }
43
44 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
45 {
46         char *bufp;
47         u32 read_data;
48         int i;
49
50         bufp = drvdata->buf;
51         drvdata->len = 0;
52         while (1) {
53                 for (i = 0; i < drvdata->memwidth; i++) {
54                         read_data = readl_relaxed(drvdata->base + TMC_RRD);
55                         if (read_data == 0xFFFFFFFF)
56                                 return;
57                         memcpy(bufp, &read_data, 4);
58                         bufp += 4;
59                         drvdata->len += 4;
60                 }
61         }
62 }
63
64 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
65 {
66         CS_UNLOCK(drvdata->base);
67
68         tmc_flush_and_stop(drvdata);
69         /*
70          * When operating in sysFS mode the content of the buffer needs to be
71          * read before the TMC is disabled.
72          */
73         if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
74                 tmc_etb_dump_hw(drvdata);
75         tmc_disable_hw(drvdata);
76
77         CS_LOCK(drvdata->base);
78 }
79
80 static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
81 {
82         CS_UNLOCK(drvdata->base);
83
84         /* Wait for TMCSReady bit to be set */
85         tmc_wait_for_tmcready(drvdata);
86
87         writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
88         writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
89                        drvdata->base + TMC_FFCR);
90         writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
91         tmc_enable_hw(drvdata);
92
93         CS_LOCK(drvdata->base);
94 }
95
96 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
97 {
98         CS_UNLOCK(drvdata->base);
99
100         tmc_flush_and_stop(drvdata);
101         tmc_disable_hw(drvdata);
102
103         CS_LOCK(drvdata->base);
104 }
105
106 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
107 {
108         int ret = 0;
109         bool used = false;
110         char *buf = NULL;
111         long val;
112         unsigned long flags;
113         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
114
115          /* This shouldn't be happening */
116         if (WARN_ON(mode != CS_MODE_SYSFS))
117                 return -EINVAL;
118
119         /*
120          * If we don't have a buffer release the lock and allocate memory.
121          * Otherwise keep the lock and move along.
122          */
123         spin_lock_irqsave(&drvdata->spinlock, flags);
124         if (!drvdata->buf) {
125                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
126
127                 /* Allocating the memory here while outside of the spinlock */
128                 buf = kzalloc(drvdata->size, GFP_KERNEL);
129                 if (!buf)
130                         return -ENOMEM;
131
132                 /* Let's try again */
133                 spin_lock_irqsave(&drvdata->spinlock, flags);
134         }
135
136         if (drvdata->reading) {
137                 ret = -EBUSY;
138                 goto out;
139         }
140
141         val = local_xchg(&drvdata->mode, mode);
142         /*
143          * In sysFS mode we can have multiple writers per sink.  Since this
144          * sink is already enabled no memory is needed and the HW need not be
145          * touched.
146          */
147         if (val == CS_MODE_SYSFS)
148                 goto out;
149
150         /*
151          * If drvdata::buf isn't NULL, memory was allocated for a previous
152          * trace run but wasn't read.  If so simply zero-out the memory.
153          * Otherwise use the memory allocated above.
154          *
155          * The memory is freed when users read the buffer using the
156          * /dev/xyz.{etf|etb} interface.  See tmc_read_unprepare_etf() for
157          * details.
158          */
159         if (drvdata->buf) {
160                 memset(drvdata->buf, 0, drvdata->size);
161         } else {
162                 used = true;
163                 drvdata->buf = buf;
164         }
165
166         tmc_etb_enable_hw(drvdata);
167 out:
168         spin_unlock_irqrestore(&drvdata->spinlock, flags);
169
170         /* Free memory outside the spinlock if need be */
171         if (!used)
172                 kfree(buf);
173
174         if (!ret)
175                 dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
176
177         return ret;
178 }
179
180 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode)
181 {
182         int ret = 0;
183         long val;
184         unsigned long flags;
185         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
186
187          /* This shouldn't be happening */
188         if (WARN_ON(mode != CS_MODE_PERF))
189                 return -EINVAL;
190
191         spin_lock_irqsave(&drvdata->spinlock, flags);
192         if (drvdata->reading) {
193                 ret = -EINVAL;
194                 goto out;
195         }
196
197         val = local_xchg(&drvdata->mode, mode);
198         /*
199          * In Perf mode there can be only one writer per sink.  There
200          * is also no need to continue if the ETB/ETR is already operated
201          * from sysFS.
202          */
203         if (val != CS_MODE_DISABLED) {
204                 ret = -EINVAL;
205                 goto out;
206         }
207
208         tmc_etb_enable_hw(drvdata);
209 out:
210         spin_unlock_irqrestore(&drvdata->spinlock, flags);
211
212         return ret;
213 }
214
215 static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
216 {
217         switch (mode) {
218         case CS_MODE_SYSFS:
219                 return tmc_enable_etf_sink_sysfs(csdev, mode);
220         case CS_MODE_PERF:
221                 return tmc_enable_etf_sink_perf(csdev, mode);
222         }
223
224         /* We shouldn't be here */
225         return -EINVAL;
226 }
227
228 static void tmc_disable_etf_sink(struct coresight_device *csdev)
229 {
230         long val;
231         unsigned long flags;
232         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
233
234         spin_lock_irqsave(&drvdata->spinlock, flags);
235         if (drvdata->reading) {
236                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
237                 return;
238         }
239
240         val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
241         /* Disable the TMC only if it needs to */
242         if (val != CS_MODE_DISABLED)
243                 tmc_etb_disable_hw(drvdata);
244
245         spin_unlock_irqrestore(&drvdata->spinlock, flags);
246
247         dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
248 }
249
250 static int tmc_enable_etf_link(struct coresight_device *csdev,
251                                int inport, int outport)
252 {
253         unsigned long flags;
254         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
255
256         spin_lock_irqsave(&drvdata->spinlock, flags);
257         if (drvdata->reading) {
258                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
259                 return -EBUSY;
260         }
261
262         tmc_etf_enable_hw(drvdata);
263         local_set(&drvdata->mode, CS_MODE_SYSFS);
264         spin_unlock_irqrestore(&drvdata->spinlock, flags);
265
266         dev_info(drvdata->dev, "TMC-ETF enabled\n");
267         return 0;
268 }
269
270 static void tmc_disable_etf_link(struct coresight_device *csdev,
271                                  int inport, int outport)
272 {
273         unsigned long flags;
274         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
275
276         spin_lock_irqsave(&drvdata->spinlock, flags);
277         if (drvdata->reading) {
278                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
279                 return;
280         }
281
282         tmc_etf_disable_hw(drvdata);
283         local_set(&drvdata->mode, CS_MODE_DISABLED);
284         spin_unlock_irqrestore(&drvdata->spinlock, flags);
285
286         dev_info(drvdata->dev, "TMC disabled\n");
287 }
288
289 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
290                                   void **pages, int nr_pages, bool overwrite)
291 {
292         int node;
293         struct cs_buffers *buf;
294
295         if (cpu == -1)
296                 cpu = smp_processor_id();
297         node = cpu_to_node(cpu);
298
299         /* Allocate memory structure for interaction with Perf */
300         buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
301         if (!buf)
302                 return NULL;
303
304         buf->snapshot = overwrite;
305         buf->nr_pages = nr_pages;
306         buf->data_pages = pages;
307
308         return buf;
309 }
310
311 static void tmc_free_etf_buffer(void *config)
312 {
313         struct cs_buffers *buf = config;
314
315         kfree(buf);
316 }
317
318 static int tmc_set_etf_buffer(struct coresight_device *csdev,
319                               struct perf_output_handle *handle,
320                               void *sink_config)
321 {
322         int ret = 0;
323         unsigned long head;
324         struct cs_buffers *buf = sink_config;
325
326         /* wrap head around to the amount of space we have */
327         head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
328
329         /* find the page to write to */
330         buf->cur = head / PAGE_SIZE;
331
332         /* and offset within that page */
333         buf->offset = head % PAGE_SIZE;
334
335         local_set(&buf->data_size, 0);
336
337         return ret;
338 }
339
340 static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
341                                           struct perf_output_handle *handle,
342                                           void *sink_config, bool *lost)
343 {
344         long size = 0;
345         struct cs_buffers *buf = sink_config;
346
347         if (buf) {
348                 /*
349                  * In snapshot mode ->data_size holds the new address of the
350                  * ring buffer's head.  The size itself is the whole address
351                  * range since we want the latest information.
352                  */
353                 if (buf->snapshot)
354                         handle->head = local_xchg(&buf->data_size,
355                                                   buf->nr_pages << PAGE_SHIFT);
356                 /*
357                  * Tell the tracer PMU how much we got in this run and if
358                  * something went wrong along the way.  Nobody else can use
359                  * this cs_buffers instance until we are done.  As such
360                  * resetting parameters here and squaring off with the ring
361                  * buffer API in the tracer PMU is fine.
362                  */
363                 *lost = !!local_xchg(&buf->lost, 0);
364                 size = local_xchg(&buf->data_size, 0);
365         }
366
367         return size;
368 }
369
370 static void tmc_update_etf_buffer(struct coresight_device *csdev,
371                                   struct perf_output_handle *handle,
372                                   void *sink_config)
373 {
374         int i, cur;
375         u32 *buf_ptr;
376         u32 read_ptr, write_ptr;
377         u32 status, to_read;
378         unsigned long offset;
379         struct cs_buffers *buf = sink_config;
380         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
381
382         if (!buf)
383                 return;
384
385         /* This shouldn't happen */
386         if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF))
387                 return;
388
389         CS_UNLOCK(drvdata->base);
390
391         tmc_flush_and_stop(drvdata);
392
393         read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
394         write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
395
396         /*
397          * Get a hold of the status register and see if a wrap around
398          * has occurred.  If so adjust things accordingly.
399          */
400         status = readl_relaxed(drvdata->base + TMC_STS);
401         if (status & TMC_STS_FULL) {
402                 local_inc(&buf->lost);
403                 to_read = drvdata->size;
404         } else {
405                 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
406         }
407
408         /*
409          * The TMC RAM buffer may be bigger than the space available in the
410          * perf ring buffer (handle->size).  If so advance the RRP so that we
411          * get the latest trace data.
412          */
413         if (to_read > handle->size) {
414                 u32 mask = 0;
415
416                 /*
417                  * The value written to RRP must be byte-address aligned to
418                  * the width of the trace memory databus _and_ to a frame
419                  * boundary (16 byte), whichever is the biggest. For example,
420                  * for 32-bit, 64-bit and 128-bit wide trace memory, the four
421                  * LSBs must be 0s. For 256-bit wide trace memory, the five
422                  * LSBs must be 0s.
423                  */
424                 switch (drvdata->memwidth) {
425                 case TMC_MEM_INTF_WIDTH_32BITS:
426                 case TMC_MEM_INTF_WIDTH_64BITS:
427                 case TMC_MEM_INTF_WIDTH_128BITS:
428                         mask = GENMASK(31, 5);
429                         break;
430                 case TMC_MEM_INTF_WIDTH_256BITS:
431                         mask = GENMASK(31, 6);
432                         break;
433                 }
434
435                 /*
436                  * Make sure the new size is aligned in accordance with the
437                  * requirement explained above.
438                  */
439                 to_read = handle->size & mask;
440                 /* Move the RAM read pointer up */
441                 read_ptr = (write_ptr + drvdata->size) - to_read;
442                 /* Make sure we are still within our limits */
443                 if (read_ptr > (drvdata->size - 1))
444                         read_ptr -= drvdata->size;
445                 /* Tell the HW */
446                 writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
447                 local_inc(&buf->lost);
448         }
449
450         cur = buf->cur;
451         offset = buf->offset;
452
453         /* for every byte to read */
454         for (i = 0; i < to_read; i += 4) {
455                 buf_ptr = buf->data_pages[cur] + offset;
456                 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
457
458                 offset += 4;
459                 if (offset >= PAGE_SIZE) {
460                         offset = 0;
461                         cur++;
462                         /* wrap around at the end of the buffer */
463                         cur &= buf->nr_pages - 1;
464                 }
465         }
466
467         /*
468          * In snapshot mode all we have to do is communicate to
469          * perf_aux_output_end() the address of the current head.  In full
470          * trace mode the same function expects a size to move rb->aux_head
471          * forward.
472          */
473         if (buf->snapshot)
474                 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
475         else
476                 local_add(to_read, &buf->data_size);
477
478         CS_LOCK(drvdata->base);
479 }
480
481 static const struct coresight_ops_sink tmc_etf_sink_ops = {
482         .enable         = tmc_enable_etf_sink,
483         .disable        = tmc_disable_etf_sink,
484         .alloc_buffer   = tmc_alloc_etf_buffer,
485         .free_buffer    = tmc_free_etf_buffer,
486         .set_buffer     = tmc_set_etf_buffer,
487         .reset_buffer   = tmc_reset_etf_buffer,
488         .update_buffer  = tmc_update_etf_buffer,
489 };
490
491 static const struct coresight_ops_link tmc_etf_link_ops = {
492         .enable         = tmc_enable_etf_link,
493         .disable        = tmc_disable_etf_link,
494 };
495
496 const struct coresight_ops tmc_etb_cs_ops = {
497         .sink_ops       = &tmc_etf_sink_ops,
498 };
499
500 const struct coresight_ops tmc_etf_cs_ops = {
501         .sink_ops       = &tmc_etf_sink_ops,
502         .link_ops       = &tmc_etf_link_ops,
503 };
504
505 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
506 {
507         long val;
508         enum tmc_mode mode;
509         int ret = 0;
510         unsigned long flags;
511
512         /* config types are set a boot time and never change */
513         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
514                          drvdata->config_type != TMC_CONFIG_TYPE_ETF))
515                 return -EINVAL;
516
517         spin_lock_irqsave(&drvdata->spinlock, flags);
518
519         if (drvdata->reading) {
520                 ret = -EBUSY;
521                 goto out;
522         }
523
524         /* There is no point in reading a TMC in HW FIFO mode */
525         mode = readl_relaxed(drvdata->base + TMC_MODE);
526         if (mode != TMC_MODE_CIRCULAR_BUFFER) {
527                 ret = -EINVAL;
528                 goto out;
529         }
530
531         val = local_read(&drvdata->mode);
532         /* Don't interfere if operated from Perf */
533         if (val == CS_MODE_PERF) {
534                 ret = -EINVAL;
535                 goto out;
536         }
537
538         /* If drvdata::buf is NULL the trace data has been read already */
539         if (drvdata->buf == NULL) {
540                 ret = -EINVAL;
541                 goto out;
542         }
543
544         /* Disable the TMC if need be */
545         if (val == CS_MODE_SYSFS)
546                 tmc_etb_disable_hw(drvdata);
547
548         drvdata->reading = true;
549 out:
550         spin_unlock_irqrestore(&drvdata->spinlock, flags);
551
552         return ret;
553 }
554
555 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
556 {
557         char *buf = NULL;
558         enum tmc_mode mode;
559         unsigned long flags;
560
561         /* config types are set a boot time and never change */
562         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
563                          drvdata->config_type != TMC_CONFIG_TYPE_ETF))
564                 return -EINVAL;
565
566         spin_lock_irqsave(&drvdata->spinlock, flags);
567
568         /* There is no point in reading a TMC in HW FIFO mode */
569         mode = readl_relaxed(drvdata->base + TMC_MODE);
570         if (mode != TMC_MODE_CIRCULAR_BUFFER) {
571                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
572                 return -EINVAL;
573         }
574
575         /* Re-enable the TMC if need be */
576         if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
577                 /*
578                  * The trace run will continue with the same allocated trace
579                  * buffer. As such zero-out the buffer so that we don't end
580                  * up with stale data.
581                  *
582                  * Since the tracer is still enabled drvdata::buf
583                  * can't be NULL.
584                  */
585                 memset(drvdata->buf, 0, drvdata->size);
586                 tmc_etb_enable_hw(drvdata);
587         } else {
588                 /*
589                  * The ETB/ETF is not tracing and the buffer was just read.
590                  * As such prepare to free the trace buffer.
591                  */
592                 buf = drvdata->buf;
593                 drvdata->buf = NULL;
594         }
595
596         drvdata->reading = false;
597         spin_unlock_irqrestore(&drvdata->spinlock, flags);
598
599         /*
600          * Free allocated memory outside of the spinlock.  There is no need
601          * to assert the validity of 'buf' since calling kfree(NULL) is safe.
602          */
603         kfree(buf);
604
605         return 0;
606 }