Merge tag 'usb-4.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[cascardo/linux.git] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/trace_events.h>
7 #include <linux/ring_buffer.h>
8 #include <linux/trace_clock.h>
9 #include <linux/trace_seq.h>
10 #include <linux/spinlock.h>
11 #include <linux/irq_work.h>
12 #include <linux/uaccess.h>
13 #include <linux/hardirq.h>
14 #include <linux/kthread.h>      /* for self test */
15 #include <linux/kmemcheck.h>
16 #include <linux/module.h>
17 #include <linux/percpu.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
22 #include <linux/hash.h>
23 #include <linux/list.h>
24 #include <linux/cpu.h>
25
26 #include <asm/local.h>
27
28 static void update_pages_handler(struct work_struct *work);
29
30 /*
31  * The ring buffer header is special. We must manually up keep it.
32  */
33 int ring_buffer_print_entry_header(struct trace_seq *s)
34 {
35         trace_seq_puts(s, "# compressed entry header\n");
36         trace_seq_puts(s, "\ttype_len    :    5 bits\n");
37         trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
38         trace_seq_puts(s, "\tarray       :   32 bits\n");
39         trace_seq_putc(s, '\n');
40         trace_seq_printf(s, "\tpadding     : type == %d\n",
41                          RINGBUF_TYPE_PADDING);
42         trace_seq_printf(s, "\ttime_extend : type == %d\n",
43                          RINGBUF_TYPE_TIME_EXTEND);
44         trace_seq_printf(s, "\tdata max type_len  == %d\n",
45                          RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
46
47         return !trace_seq_has_overflowed(s);
48 }
49
50 /*
51  * The ring buffer is made up of a list of pages. A separate list of pages is
52  * allocated for each CPU. A writer may only write to a buffer that is
53  * associated with the CPU it is currently executing on.  A reader may read
54  * from any per cpu buffer.
55  *
56  * The reader is special. For each per cpu buffer, the reader has its own
57  * reader page. When a reader has read the entire reader page, this reader
58  * page is swapped with another page in the ring buffer.
59  *
60  * Now, as long as the writer is off the reader page, the reader can do what
61  * ever it wants with that page. The writer will never write to that page
62  * again (as long as it is out of the ring buffer).
63  *
64  * Here's some silly ASCII art.
65  *
66  *   +------+
67  *   |reader|          RING BUFFER
68  *   |page  |
69  *   +------+        +---+   +---+   +---+
70  *                   |   |-->|   |-->|   |
71  *                   +---+   +---+   +---+
72  *                     ^               |
73  *                     |               |
74  *                     +---------------+
75  *
76  *
77  *   +------+
78  *   |reader|          RING BUFFER
79  *   |page  |------------------v
80  *   +------+        +---+   +---+   +---+
81  *                   |   |-->|   |-->|   |
82  *                   +---+   +---+   +---+
83  *                     ^               |
84  *                     |               |
85  *                     +---------------+
86  *
87  *
88  *   +------+
89  *   |reader|          RING BUFFER
90  *   |page  |------------------v
91  *   +------+        +---+   +---+   +---+
92  *      ^            |   |-->|   |-->|   |
93  *      |            +---+   +---+   +---+
94  *      |                              |
95  *      |                              |
96  *      +------------------------------+
97  *
98  *
99  *   +------+
100  *   |buffer|          RING BUFFER
101  *   |page  |------------------v
102  *   +------+        +---+   +---+   +---+
103  *      ^            |   |   |   |-->|   |
104  *      |   New      +---+   +---+   +---+
105  *      |  Reader------^               |
106  *      |   page                       |
107  *      +------------------------------+
108  *
109  *
110  * After we make this swap, the reader can hand this page off to the splice
111  * code and be done with it. It can even allocate a new page if it needs to
112  * and swap that into the ring buffer.
113  *
114  * We will be using cmpxchg soon to make all this lockless.
115  *
116  */
117
118 /* Used for individual buffers (after the counter) */
119 #define RB_BUFFER_OFF           (1 << 20)
120
121 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
122
123 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
124 #define RB_ALIGNMENT            4U
125 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
126 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
127
128 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
129 # define RB_FORCE_8BYTE_ALIGNMENT       0
130 # define RB_ARCH_ALIGNMENT              RB_ALIGNMENT
131 #else
132 # define RB_FORCE_8BYTE_ALIGNMENT       1
133 # define RB_ARCH_ALIGNMENT              8U
134 #endif
135
136 #define RB_ALIGN_DATA           __aligned(RB_ARCH_ALIGNMENT)
137
138 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
139 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
140
141 enum {
142         RB_LEN_TIME_EXTEND = 8,
143         RB_LEN_TIME_STAMP = 16,
144 };
145
146 #define skip_time_extend(event) \
147         ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
148
149 static inline int rb_null_event(struct ring_buffer_event *event)
150 {
151         return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
152 }
153
154 static void rb_event_set_padding(struct ring_buffer_event *event)
155 {
156         /* padding has a NULL time_delta */
157         event->type_len = RINGBUF_TYPE_PADDING;
158         event->time_delta = 0;
159 }
160
161 static unsigned
162 rb_event_data_length(struct ring_buffer_event *event)
163 {
164         unsigned length;
165
166         if (event->type_len)
167                 length = event->type_len * RB_ALIGNMENT;
168         else
169                 length = event->array[0];
170         return length + RB_EVNT_HDR_SIZE;
171 }
172
173 /*
174  * Return the length of the given event. Will return
175  * the length of the time extend if the event is a
176  * time extend.
177  */
178 static inline unsigned
179 rb_event_length(struct ring_buffer_event *event)
180 {
181         switch (event->type_len) {
182         case RINGBUF_TYPE_PADDING:
183                 if (rb_null_event(event))
184                         /* undefined */
185                         return -1;
186                 return  event->array[0] + RB_EVNT_HDR_SIZE;
187
188         case RINGBUF_TYPE_TIME_EXTEND:
189                 return RB_LEN_TIME_EXTEND;
190
191         case RINGBUF_TYPE_TIME_STAMP:
192                 return RB_LEN_TIME_STAMP;
193
194         case RINGBUF_TYPE_DATA:
195                 return rb_event_data_length(event);
196         default:
197                 BUG();
198         }
199         /* not hit */
200         return 0;
201 }
202
203 /*
204  * Return total length of time extend and data,
205  *   or just the event length for all other events.
206  */
207 static inline unsigned
208 rb_event_ts_length(struct ring_buffer_event *event)
209 {
210         unsigned len = 0;
211
212         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
213                 /* time extends include the data event after it */
214                 len = RB_LEN_TIME_EXTEND;
215                 event = skip_time_extend(event);
216         }
217         return len + rb_event_length(event);
218 }
219
220 /**
221  * ring_buffer_event_length - return the length of the event
222  * @event: the event to get the length of
223  *
224  * Returns the size of the data load of a data event.
225  * If the event is something other than a data event, it
226  * returns the size of the event itself. With the exception
227  * of a TIME EXTEND, where it still returns the size of the
228  * data load of the data event after it.
229  */
230 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
231 {
232         unsigned length;
233
234         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
235                 event = skip_time_extend(event);
236
237         length = rb_event_length(event);
238         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
239                 return length;
240         length -= RB_EVNT_HDR_SIZE;
241         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
242                 length -= sizeof(event->array[0]);
243         return length;
244 }
245 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
246
247 /* inline for ring buffer fast paths */
248 static void *
249 rb_event_data(struct ring_buffer_event *event)
250 {
251         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
252                 event = skip_time_extend(event);
253         BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
254         /* If length is in len field, then array[0] has the data */
255         if (event->type_len)
256                 return (void *)&event->array[0];
257         /* Otherwise length is in array[0] and array[1] has the data */
258         return (void *)&event->array[1];
259 }
260
261 /**
262  * ring_buffer_event_data - return the data of the event
263  * @event: the event to get the data from
264  */
265 void *ring_buffer_event_data(struct ring_buffer_event *event)
266 {
267         return rb_event_data(event);
268 }
269 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
270
271 #define for_each_buffer_cpu(buffer, cpu)                \
272         for_each_cpu(cpu, buffer->cpumask)
273
274 #define TS_SHIFT        27
275 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
276 #define TS_DELTA_TEST   (~TS_MASK)
277
278 /* Flag when events were overwritten */
279 #define RB_MISSED_EVENTS        (1 << 31)
280 /* Missed count stored at end */
281 #define RB_MISSED_STORED        (1 << 30)
282
283 struct buffer_data_page {
284         u64              time_stamp;    /* page time stamp */
285         local_t          commit;        /* write committed index */
286         unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
287 };
288
289 /*
290  * Note, the buffer_page list must be first. The buffer pages
291  * are allocated in cache lines, which means that each buffer
292  * page will be at the beginning of a cache line, and thus
293  * the least significant bits will be zero. We use this to
294  * add flags in the list struct pointers, to make the ring buffer
295  * lockless.
296  */
297 struct buffer_page {
298         struct list_head list;          /* list of buffer pages */
299         local_t          write;         /* index for next write */
300         unsigned         read;          /* index for next read */
301         local_t          entries;       /* entries on this page */
302         unsigned long    real_end;      /* real end of data */
303         struct buffer_data_page *page;  /* Actual data page */
304 };
305
306 /*
307  * The buffer page counters, write and entries, must be reset
308  * atomically when crossing page boundaries. To synchronize this
309  * update, two counters are inserted into the number. One is
310  * the actual counter for the write position or count on the page.
311  *
312  * The other is a counter of updaters. Before an update happens
313  * the update partition of the counter is incremented. This will
314  * allow the updater to update the counter atomically.
315  *
316  * The counter is 20 bits, and the state data is 12.
317  */
318 #define RB_WRITE_MASK           0xfffff
319 #define RB_WRITE_INTCNT         (1 << 20)
320
321 static void rb_init_page(struct buffer_data_page *bpage)
322 {
323         local_set(&bpage->commit, 0);
324 }
325
326 /**
327  * ring_buffer_page_len - the size of data on the page.
328  * @page: The page to read
329  *
330  * Returns the amount of data on the page, including buffer page header.
331  */
332 size_t ring_buffer_page_len(void *page)
333 {
334         return local_read(&((struct buffer_data_page *)page)->commit)
335                 + BUF_PAGE_HDR_SIZE;
336 }
337
338 /*
339  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
340  * this issue out.
341  */
342 static void free_buffer_page(struct buffer_page *bpage)
343 {
344         free_page((unsigned long)bpage->page);
345         kfree(bpage);
346 }
347
348 /*
349  * We need to fit the time_stamp delta into 27 bits.
350  */
351 static inline int test_time_stamp(u64 delta)
352 {
353         if (delta & TS_DELTA_TEST)
354                 return 1;
355         return 0;
356 }
357
358 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
359
360 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
361 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
362
363 int ring_buffer_print_page_header(struct trace_seq *s)
364 {
365         struct buffer_data_page field;
366
367         trace_seq_printf(s, "\tfield: u64 timestamp;\t"
368                          "offset:0;\tsize:%u;\tsigned:%u;\n",
369                          (unsigned int)sizeof(field.time_stamp),
370                          (unsigned int)is_signed_type(u64));
371
372         trace_seq_printf(s, "\tfield: local_t commit;\t"
373                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
374                          (unsigned int)offsetof(typeof(field), commit),
375                          (unsigned int)sizeof(field.commit),
376                          (unsigned int)is_signed_type(long));
377
378         trace_seq_printf(s, "\tfield: int overwrite;\t"
379                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
380                          (unsigned int)offsetof(typeof(field), commit),
381                          1,
382                          (unsigned int)is_signed_type(long));
383
384         trace_seq_printf(s, "\tfield: char data;\t"
385                          "offset:%u;\tsize:%u;\tsigned:%u;\n",
386                          (unsigned int)offsetof(typeof(field), data),
387                          (unsigned int)BUF_PAGE_SIZE,
388                          (unsigned int)is_signed_type(char));
389
390         return !trace_seq_has_overflowed(s);
391 }
392
393 struct rb_irq_work {
394         struct irq_work                 work;
395         wait_queue_head_t               waiters;
396         wait_queue_head_t               full_waiters;
397         bool                            waiters_pending;
398         bool                            full_waiters_pending;
399         bool                            wakeup_full;
400 };
401
402 /*
403  * Structure to hold event state and handle nested events.
404  */
405 struct rb_event_info {
406         u64                     ts;
407         u64                     delta;
408         unsigned long           length;
409         struct buffer_page      *tail_page;
410         int                     add_timestamp;
411 };
412
413 /*
414  * Used for which event context the event is in.
415  *  NMI     = 0
416  *  IRQ     = 1
417  *  SOFTIRQ = 2
418  *  NORMAL  = 3
419  *
420  * See trace_recursive_lock() comment below for more details.
421  */
422 enum {
423         RB_CTX_NMI,
424         RB_CTX_IRQ,
425         RB_CTX_SOFTIRQ,
426         RB_CTX_NORMAL,
427         RB_CTX_MAX
428 };
429
430 /*
431  * head_page == tail_page && head == tail then buffer is empty.
432  */
433 struct ring_buffer_per_cpu {
434         int                             cpu;
435         atomic_t                        record_disabled;
436         struct ring_buffer              *buffer;
437         raw_spinlock_t                  reader_lock;    /* serialize readers */
438         arch_spinlock_t                 lock;
439         struct lock_class_key           lock_key;
440         unsigned long                   nr_pages;
441         unsigned int                    current_context;
442         struct list_head                *pages;
443         struct buffer_page              *head_page;     /* read from head */
444         struct buffer_page              *tail_page;     /* write to tail */
445         struct buffer_page              *commit_page;   /* committed pages */
446         struct buffer_page              *reader_page;
447         unsigned long                   lost_events;
448         unsigned long                   last_overrun;
449         local_t                         entries_bytes;
450         local_t                         entries;
451         local_t                         overrun;
452         local_t                         commit_overrun;
453         local_t                         dropped_events;
454         local_t                         committing;
455         local_t                         commits;
456         unsigned long                   read;
457         unsigned long                   read_bytes;
458         u64                             write_stamp;
459         u64                             read_stamp;
460         /* ring buffer pages to update, > 0 to add, < 0 to remove */
461         long                            nr_pages_to_update;
462         struct list_head                new_pages; /* new pages to add */
463         struct work_struct              update_pages_work;
464         struct completion               update_done;
465
466         struct rb_irq_work              irq_work;
467 };
468
469 struct ring_buffer {
470         unsigned                        flags;
471         int                             cpus;
472         atomic_t                        record_disabled;
473         atomic_t                        resize_disabled;
474         cpumask_var_t                   cpumask;
475
476         struct lock_class_key           *reader_lock_key;
477
478         struct mutex                    mutex;
479
480         struct ring_buffer_per_cpu      **buffers;
481
482 #ifdef CONFIG_HOTPLUG_CPU
483         struct notifier_block           cpu_notify;
484 #endif
485         u64                             (*clock)(void);
486
487         struct rb_irq_work              irq_work;
488 };
489
490 struct ring_buffer_iter {
491         struct ring_buffer_per_cpu      *cpu_buffer;
492         unsigned long                   head;
493         struct buffer_page              *head_page;
494         struct buffer_page              *cache_reader_page;
495         unsigned long                   cache_read;
496         u64                             read_stamp;
497 };
498
499 /*
500  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
501  *
502  * Schedules a delayed work to wake up any task that is blocked on the
503  * ring buffer waiters queue.
504  */
505 static void rb_wake_up_waiters(struct irq_work *work)
506 {
507         struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
508
509         wake_up_all(&rbwork->waiters);
510         if (rbwork->wakeup_full) {
511                 rbwork->wakeup_full = false;
512                 wake_up_all(&rbwork->full_waiters);
513         }
514 }
515
516 /**
517  * ring_buffer_wait - wait for input to the ring buffer
518  * @buffer: buffer to wait on
519  * @cpu: the cpu buffer to wait on
520  * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
521  *
522  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
523  * as data is added to any of the @buffer's cpu buffers. Otherwise
524  * it will wait for data to be added to a specific cpu buffer.
525  */
526 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
527 {
528         struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
529         DEFINE_WAIT(wait);
530         struct rb_irq_work *work;
531         int ret = 0;
532
533         /*
534          * Depending on what the caller is waiting for, either any
535          * data in any cpu buffer, or a specific buffer, put the
536          * caller on the appropriate wait queue.
537          */
538         if (cpu == RING_BUFFER_ALL_CPUS) {
539                 work = &buffer->irq_work;
540                 /* Full only makes sense on per cpu reads */
541                 full = false;
542         } else {
543                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
544                         return -ENODEV;
545                 cpu_buffer = buffer->buffers[cpu];
546                 work = &cpu_buffer->irq_work;
547         }
548
549
550         while (true) {
551                 if (full)
552                         prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
553                 else
554                         prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
555
556                 /*
557                  * The events can happen in critical sections where
558                  * checking a work queue can cause deadlocks.
559                  * After adding a task to the queue, this flag is set
560                  * only to notify events to try to wake up the queue
561                  * using irq_work.
562                  *
563                  * We don't clear it even if the buffer is no longer
564                  * empty. The flag only causes the next event to run
565                  * irq_work to do the work queue wake up. The worse
566                  * that can happen if we race with !trace_empty() is that
567                  * an event will cause an irq_work to try to wake up
568                  * an empty queue.
569                  *
570                  * There's no reason to protect this flag either, as
571                  * the work queue and irq_work logic will do the necessary
572                  * synchronization for the wake ups. The only thing
573                  * that is necessary is that the wake up happens after
574                  * a task has been queued. It's OK for spurious wake ups.
575                  */
576                 if (full)
577                         work->full_waiters_pending = true;
578                 else
579                         work->waiters_pending = true;
580
581                 if (signal_pending(current)) {
582                         ret = -EINTR;
583                         break;
584                 }
585
586                 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
587                         break;
588
589                 if (cpu != RING_BUFFER_ALL_CPUS &&
590                     !ring_buffer_empty_cpu(buffer, cpu)) {
591                         unsigned long flags;
592                         bool pagebusy;
593
594                         if (!full)
595                                 break;
596
597                         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
598                         pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
599                         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
600
601                         if (!pagebusy)
602                                 break;
603                 }
604
605                 schedule();
606         }
607
608         if (full)
609                 finish_wait(&work->full_waiters, &wait);
610         else
611                 finish_wait(&work->waiters, &wait);
612
613         return ret;
614 }
615
616 /**
617  * ring_buffer_poll_wait - poll on buffer input
618  * @buffer: buffer to wait on
619  * @cpu: the cpu buffer to wait on
620  * @filp: the file descriptor
621  * @poll_table: The poll descriptor
622  *
623  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
624  * as data is added to any of the @buffer's cpu buffers. Otherwise
625  * it will wait for data to be added to a specific cpu buffer.
626  *
627  * Returns POLLIN | POLLRDNORM if data exists in the buffers,
628  * zero otherwise.
629  */
630 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
631                           struct file *filp, poll_table *poll_table)
632 {
633         struct ring_buffer_per_cpu *cpu_buffer;
634         struct rb_irq_work *work;
635
636         if (cpu == RING_BUFFER_ALL_CPUS)
637                 work = &buffer->irq_work;
638         else {
639                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
640                         return -EINVAL;
641
642                 cpu_buffer = buffer->buffers[cpu];
643                 work = &cpu_buffer->irq_work;
644         }
645
646         poll_wait(filp, &work->waiters, poll_table);
647         work->waiters_pending = true;
648         /*
649          * There's a tight race between setting the waiters_pending and
650          * checking if the ring buffer is empty.  Once the waiters_pending bit
651          * is set, the next event will wake the task up, but we can get stuck
652          * if there's only a single event in.
653          *
654          * FIXME: Ideally, we need a memory barrier on the writer side as well,
655          * but adding a memory barrier to all events will cause too much of a
656          * performance hit in the fast path.  We only need a memory barrier when
657          * the buffer goes from empty to having content.  But as this race is
658          * extremely small, and it's not a problem if another event comes in, we
659          * will fix it later.
660          */
661         smp_mb();
662
663         if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
664             (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
665                 return POLLIN | POLLRDNORM;
666         return 0;
667 }
668
669 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
670 #define RB_WARN_ON(b, cond)                                             \
671         ({                                                              \
672                 int _____ret = unlikely(cond);                          \
673                 if (_____ret) {                                         \
674                         if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
675                                 struct ring_buffer_per_cpu *__b =       \
676                                         (void *)b;                      \
677                                 atomic_inc(&__b->buffer->record_disabled); \
678                         } else                                          \
679                                 atomic_inc(&b->record_disabled);        \
680                         WARN_ON(1);                                     \
681                 }                                                       \
682                 _____ret;                                               \
683         })
684
685 /* Up this if you want to test the TIME_EXTENTS and normalization */
686 #define DEBUG_SHIFT 0
687
688 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
689 {
690         /* shift to debug/test normalization and TIME_EXTENTS */
691         return buffer->clock() << DEBUG_SHIFT;
692 }
693
694 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
695 {
696         u64 time;
697
698         preempt_disable_notrace();
699         time = rb_time_stamp(buffer);
700         preempt_enable_no_resched_notrace();
701
702         return time;
703 }
704 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
705
706 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
707                                       int cpu, u64 *ts)
708 {
709         /* Just stupid testing the normalize function and deltas */
710         *ts >>= DEBUG_SHIFT;
711 }
712 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
713
714 /*
715  * Making the ring buffer lockless makes things tricky.
716  * Although writes only happen on the CPU that they are on,
717  * and they only need to worry about interrupts. Reads can
718  * happen on any CPU.
719  *
720  * The reader page is always off the ring buffer, but when the
721  * reader finishes with a page, it needs to swap its page with
722  * a new one from the buffer. The reader needs to take from
723  * the head (writes go to the tail). But if a writer is in overwrite
724  * mode and wraps, it must push the head page forward.
725  *
726  * Here lies the problem.
727  *
728  * The reader must be careful to replace only the head page, and
729  * not another one. As described at the top of the file in the
730  * ASCII art, the reader sets its old page to point to the next
731  * page after head. It then sets the page after head to point to
732  * the old reader page. But if the writer moves the head page
733  * during this operation, the reader could end up with the tail.
734  *
735  * We use cmpxchg to help prevent this race. We also do something
736  * special with the page before head. We set the LSB to 1.
737  *
738  * When the writer must push the page forward, it will clear the
739  * bit that points to the head page, move the head, and then set
740  * the bit that points to the new head page.
741  *
742  * We also don't want an interrupt coming in and moving the head
743  * page on another writer. Thus we use the second LSB to catch
744  * that too. Thus:
745  *
746  * head->list->prev->next        bit 1          bit 0
747  *                              -------        -------
748  * Normal page                     0              0
749  * Points to head page             0              1
750  * New head page                   1              0
751  *
752  * Note we can not trust the prev pointer of the head page, because:
753  *
754  * +----+       +-----+        +-----+
755  * |    |------>|  T  |---X--->|  N  |
756  * |    |<------|     |        |     |
757  * +----+       +-----+        +-----+
758  *   ^                           ^ |
759  *   |          +-----+          | |
760  *   +----------|  R  |----------+ |
761  *              |     |<-----------+
762  *              +-----+
763  *
764  * Key:  ---X-->  HEAD flag set in pointer
765  *         T      Tail page
766  *         R      Reader page
767  *         N      Next page
768  *
769  * (see __rb_reserve_next() to see where this happens)
770  *
771  *  What the above shows is that the reader just swapped out
772  *  the reader page with a page in the buffer, but before it
773  *  could make the new header point back to the new page added
774  *  it was preempted by a writer. The writer moved forward onto
775  *  the new page added by the reader and is about to move forward
776  *  again.
777  *
778  *  You can see, it is legitimate for the previous pointer of
779  *  the head (or any page) not to point back to itself. But only
780  *  temporarially.
781  */
782
783 #define RB_PAGE_NORMAL          0UL
784 #define RB_PAGE_HEAD            1UL
785 #define RB_PAGE_UPDATE          2UL
786
787
788 #define RB_FLAG_MASK            3UL
789
790 /* PAGE_MOVED is not part of the mask */
791 #define RB_PAGE_MOVED           4UL
792
793 /*
794  * rb_list_head - remove any bit
795  */
796 static struct list_head *rb_list_head(struct list_head *list)
797 {
798         unsigned long val = (unsigned long)list;
799
800         return (struct list_head *)(val & ~RB_FLAG_MASK);
801 }
802
803 /*
804  * rb_is_head_page - test if the given page is the head page
805  *
806  * Because the reader may move the head_page pointer, we can
807  * not trust what the head page is (it may be pointing to
808  * the reader page). But if the next page is a header page,
809  * its flags will be non zero.
810  */
811 static inline int
812 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
813                 struct buffer_page *page, struct list_head *list)
814 {
815         unsigned long val;
816
817         val = (unsigned long)list->next;
818
819         if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
820                 return RB_PAGE_MOVED;
821
822         return val & RB_FLAG_MASK;
823 }
824
825 /*
826  * rb_is_reader_page
827  *
828  * The unique thing about the reader page, is that, if the
829  * writer is ever on it, the previous pointer never points
830  * back to the reader page.
831  */
832 static bool rb_is_reader_page(struct buffer_page *page)
833 {
834         struct list_head *list = page->list.prev;
835
836         return rb_list_head(list->next) != &page->list;
837 }
838
839 /*
840  * rb_set_list_to_head - set a list_head to be pointing to head.
841  */
842 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
843                                 struct list_head *list)
844 {
845         unsigned long *ptr;
846
847         ptr = (unsigned long *)&list->next;
848         *ptr |= RB_PAGE_HEAD;
849         *ptr &= ~RB_PAGE_UPDATE;
850 }
851
852 /*
853  * rb_head_page_activate - sets up head page
854  */
855 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
856 {
857         struct buffer_page *head;
858
859         head = cpu_buffer->head_page;
860         if (!head)
861                 return;
862
863         /*
864          * Set the previous list pointer to have the HEAD flag.
865          */
866         rb_set_list_to_head(cpu_buffer, head->list.prev);
867 }
868
869 static void rb_list_head_clear(struct list_head *list)
870 {
871         unsigned long *ptr = (unsigned long *)&list->next;
872
873         *ptr &= ~RB_FLAG_MASK;
874 }
875
876 /*
877  * rb_head_page_dactivate - clears head page ptr (for free list)
878  */
879 static void
880 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
881 {
882         struct list_head *hd;
883
884         /* Go through the whole list and clear any pointers found. */
885         rb_list_head_clear(cpu_buffer->pages);
886
887         list_for_each(hd, cpu_buffer->pages)
888                 rb_list_head_clear(hd);
889 }
890
891 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
892                             struct buffer_page *head,
893                             struct buffer_page *prev,
894                             int old_flag, int new_flag)
895 {
896         struct list_head *list;
897         unsigned long val = (unsigned long)&head->list;
898         unsigned long ret;
899
900         list = &prev->list;
901
902         val &= ~RB_FLAG_MASK;
903
904         ret = cmpxchg((unsigned long *)&list->next,
905                       val | old_flag, val | new_flag);
906
907         /* check if the reader took the page */
908         if ((ret & ~RB_FLAG_MASK) != val)
909                 return RB_PAGE_MOVED;
910
911         return ret & RB_FLAG_MASK;
912 }
913
914 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
915                                    struct buffer_page *head,
916                                    struct buffer_page *prev,
917                                    int old_flag)
918 {
919         return rb_head_page_set(cpu_buffer, head, prev,
920                                 old_flag, RB_PAGE_UPDATE);
921 }
922
923 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
924                                  struct buffer_page *head,
925                                  struct buffer_page *prev,
926                                  int old_flag)
927 {
928         return rb_head_page_set(cpu_buffer, head, prev,
929                                 old_flag, RB_PAGE_HEAD);
930 }
931
932 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
933                                    struct buffer_page *head,
934                                    struct buffer_page *prev,
935                                    int old_flag)
936 {
937         return rb_head_page_set(cpu_buffer, head, prev,
938                                 old_flag, RB_PAGE_NORMAL);
939 }
940
941 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
942                                struct buffer_page **bpage)
943 {
944         struct list_head *p = rb_list_head((*bpage)->list.next);
945
946         *bpage = list_entry(p, struct buffer_page, list);
947 }
948
949 static struct buffer_page *
950 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
951 {
952         struct buffer_page *head;
953         struct buffer_page *page;
954         struct list_head *list;
955         int i;
956
957         if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
958                 return NULL;
959
960         /* sanity check */
961         list = cpu_buffer->pages;
962         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
963                 return NULL;
964
965         page = head = cpu_buffer->head_page;
966         /*
967          * It is possible that the writer moves the header behind
968          * where we started, and we miss in one loop.
969          * A second loop should grab the header, but we'll do
970          * three loops just because I'm paranoid.
971          */
972         for (i = 0; i < 3; i++) {
973                 do {
974                         if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
975                                 cpu_buffer->head_page = page;
976                                 return page;
977                         }
978                         rb_inc_page(cpu_buffer, &page);
979                 } while (page != head);
980         }
981
982         RB_WARN_ON(cpu_buffer, 1);
983
984         return NULL;
985 }
986
987 static int rb_head_page_replace(struct buffer_page *old,
988                                 struct buffer_page *new)
989 {
990         unsigned long *ptr = (unsigned long *)&old->list.prev->next;
991         unsigned long val;
992         unsigned long ret;
993
994         val = *ptr & ~RB_FLAG_MASK;
995         val |= RB_PAGE_HEAD;
996
997         ret = cmpxchg(ptr, val, (unsigned long)&new->list);
998
999         return ret == val;
1000 }
1001
1002 /*
1003  * rb_tail_page_update - move the tail page forward
1004  */
1005 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1006                                struct buffer_page *tail_page,
1007                                struct buffer_page *next_page)
1008 {
1009         unsigned long old_entries;
1010         unsigned long old_write;
1011
1012         /*
1013          * The tail page now needs to be moved forward.
1014          *
1015          * We need to reset the tail page, but without messing
1016          * with possible erasing of data brought in by interrupts
1017          * that have moved the tail page and are currently on it.
1018          *
1019          * We add a counter to the write field to denote this.
1020          */
1021         old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1022         old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1023
1024         /*
1025          * Just make sure we have seen our old_write and synchronize
1026          * with any interrupts that come in.
1027          */
1028         barrier();
1029
1030         /*
1031          * If the tail page is still the same as what we think
1032          * it is, then it is up to us to update the tail
1033          * pointer.
1034          */
1035         if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1036                 /* Zero the write counter */
1037                 unsigned long val = old_write & ~RB_WRITE_MASK;
1038                 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1039
1040                 /*
1041                  * This will only succeed if an interrupt did
1042                  * not come in and change it. In which case, we
1043                  * do not want to modify it.
1044                  *
1045                  * We add (void) to let the compiler know that we do not care
1046                  * about the return value of these functions. We use the
1047                  * cmpxchg to only update if an interrupt did not already
1048                  * do it for us. If the cmpxchg fails, we don't care.
1049                  */
1050                 (void)local_cmpxchg(&next_page->write, old_write, val);
1051                 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1052
1053                 /*
1054                  * No need to worry about races with clearing out the commit.
1055                  * it only can increment when a commit takes place. But that
1056                  * only happens in the outer most nested commit.
1057                  */
1058                 local_set(&next_page->page->commit, 0);
1059
1060                 /* Again, either we update tail_page or an interrupt does */
1061                 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1062         }
1063 }
1064
1065 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1066                           struct buffer_page *bpage)
1067 {
1068         unsigned long val = (unsigned long)bpage;
1069
1070         if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1071                 return 1;
1072
1073         return 0;
1074 }
1075
1076 /**
1077  * rb_check_list - make sure a pointer to a list has the last bits zero
1078  */
1079 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1080                          struct list_head *list)
1081 {
1082         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1083                 return 1;
1084         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1085                 return 1;
1086         return 0;
1087 }
1088
1089 /**
1090  * rb_check_pages - integrity check of buffer pages
1091  * @cpu_buffer: CPU buffer with pages to test
1092  *
1093  * As a safety measure we check to make sure the data pages have not
1094  * been corrupted.
1095  */
1096 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1097 {
1098         struct list_head *head = cpu_buffer->pages;
1099         struct buffer_page *bpage, *tmp;
1100
1101         /* Reset the head page if it exists */
1102         if (cpu_buffer->head_page)
1103                 rb_set_head_page(cpu_buffer);
1104
1105         rb_head_page_deactivate(cpu_buffer);
1106
1107         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1108                 return -1;
1109         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1110                 return -1;
1111
1112         if (rb_check_list(cpu_buffer, head))
1113                 return -1;
1114
1115         list_for_each_entry_safe(bpage, tmp, head, list) {
1116                 if (RB_WARN_ON(cpu_buffer,
1117                                bpage->list.next->prev != &bpage->list))
1118                         return -1;
1119                 if (RB_WARN_ON(cpu_buffer,
1120                                bpage->list.prev->next != &bpage->list))
1121                         return -1;
1122                 if (rb_check_list(cpu_buffer, &bpage->list))
1123                         return -1;
1124         }
1125
1126         rb_head_page_activate(cpu_buffer);
1127
1128         return 0;
1129 }
1130
1131 static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1132 {
1133         struct buffer_page *bpage, *tmp;
1134         long i;
1135
1136         for (i = 0; i < nr_pages; i++) {
1137                 struct page *page;
1138                 /*
1139                  * __GFP_NORETRY flag makes sure that the allocation fails
1140                  * gracefully without invoking oom-killer and the system is
1141                  * not destabilized.
1142                  */
1143                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1144                                     GFP_KERNEL | __GFP_NORETRY,
1145                                     cpu_to_node(cpu));
1146                 if (!bpage)
1147                         goto free_pages;
1148
1149                 list_add(&bpage->list, pages);
1150
1151                 page = alloc_pages_node(cpu_to_node(cpu),
1152                                         GFP_KERNEL | __GFP_NORETRY, 0);
1153                 if (!page)
1154                         goto free_pages;
1155                 bpage->page = page_address(page);
1156                 rb_init_page(bpage->page);
1157         }
1158
1159         return 0;
1160
1161 free_pages:
1162         list_for_each_entry_safe(bpage, tmp, pages, list) {
1163                 list_del_init(&bpage->list);
1164                 free_buffer_page(bpage);
1165         }
1166
1167         return -ENOMEM;
1168 }
1169
1170 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1171                              unsigned long nr_pages)
1172 {
1173         LIST_HEAD(pages);
1174
1175         WARN_ON(!nr_pages);
1176
1177         if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1178                 return -ENOMEM;
1179
1180         /*
1181          * The ring buffer page list is a circular list that does not
1182          * start and end with a list head. All page list items point to
1183          * other pages.
1184          */
1185         cpu_buffer->pages = pages.next;
1186         list_del(&pages);
1187
1188         cpu_buffer->nr_pages = nr_pages;
1189
1190         rb_check_pages(cpu_buffer);
1191
1192         return 0;
1193 }
1194
1195 static struct ring_buffer_per_cpu *
1196 rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
1197 {
1198         struct ring_buffer_per_cpu *cpu_buffer;
1199         struct buffer_page *bpage;
1200         struct page *page;
1201         int ret;
1202
1203         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1204                                   GFP_KERNEL, cpu_to_node(cpu));
1205         if (!cpu_buffer)
1206                 return NULL;
1207
1208         cpu_buffer->cpu = cpu;
1209         cpu_buffer->buffer = buffer;
1210         raw_spin_lock_init(&cpu_buffer->reader_lock);
1211         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1212         cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1213         INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1214         init_completion(&cpu_buffer->update_done);
1215         init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1216         init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1217         init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1218
1219         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1220                             GFP_KERNEL, cpu_to_node(cpu));
1221         if (!bpage)
1222                 goto fail_free_buffer;
1223
1224         rb_check_bpage(cpu_buffer, bpage);
1225
1226         cpu_buffer->reader_page = bpage;
1227         page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1228         if (!page)
1229                 goto fail_free_reader;
1230         bpage->page = page_address(page);
1231         rb_init_page(bpage->page);
1232
1233         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1234         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1235
1236         ret = rb_allocate_pages(cpu_buffer, nr_pages);
1237         if (ret < 0)
1238                 goto fail_free_reader;
1239
1240         cpu_buffer->head_page
1241                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1242         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1243
1244         rb_head_page_activate(cpu_buffer);
1245
1246         return cpu_buffer;
1247
1248  fail_free_reader:
1249         free_buffer_page(cpu_buffer->reader_page);
1250
1251  fail_free_buffer:
1252         kfree(cpu_buffer);
1253         return NULL;
1254 }
1255
1256 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1257 {
1258         struct list_head *head = cpu_buffer->pages;
1259         struct buffer_page *bpage, *tmp;
1260
1261         free_buffer_page(cpu_buffer->reader_page);
1262
1263         rb_head_page_deactivate(cpu_buffer);
1264
1265         if (head) {
1266                 list_for_each_entry_safe(bpage, tmp, head, list) {
1267                         list_del_init(&bpage->list);
1268                         free_buffer_page(bpage);
1269                 }
1270                 bpage = list_entry(head, struct buffer_page, list);
1271                 free_buffer_page(bpage);
1272         }
1273
1274         kfree(cpu_buffer);
1275 }
1276
1277 #ifdef CONFIG_HOTPLUG_CPU
1278 static int rb_cpu_notify(struct notifier_block *self,
1279                          unsigned long action, void *hcpu);
1280 #endif
1281
1282 /**
1283  * __ring_buffer_alloc - allocate a new ring_buffer
1284  * @size: the size in bytes per cpu that is needed.
1285  * @flags: attributes to set for the ring buffer.
1286  *
1287  * Currently the only flag that is available is the RB_FL_OVERWRITE
1288  * flag. This flag means that the buffer will overwrite old data
1289  * when the buffer wraps. If this flag is not set, the buffer will
1290  * drop data when the tail hits the head.
1291  */
1292 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1293                                         struct lock_class_key *key)
1294 {
1295         struct ring_buffer *buffer;
1296         long nr_pages;
1297         int bsize;
1298         int cpu;
1299
1300         /* keep it in its own cache line */
1301         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1302                          GFP_KERNEL);
1303         if (!buffer)
1304                 return NULL;
1305
1306         if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1307                 goto fail_free_buffer;
1308
1309         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1310         buffer->flags = flags;
1311         buffer->clock = trace_clock_local;
1312         buffer->reader_lock_key = key;
1313
1314         init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1315         init_waitqueue_head(&buffer->irq_work.waiters);
1316
1317         /* need at least two pages */
1318         if (nr_pages < 2)
1319                 nr_pages = 2;
1320
1321         /*
1322          * In case of non-hotplug cpu, if the ring-buffer is allocated
1323          * in early initcall, it will not be notified of secondary cpus.
1324          * In that off case, we need to allocate for all possible cpus.
1325          */
1326 #ifdef CONFIG_HOTPLUG_CPU
1327         cpu_notifier_register_begin();
1328         cpumask_copy(buffer->cpumask, cpu_online_mask);
1329 #else
1330         cpumask_copy(buffer->cpumask, cpu_possible_mask);
1331 #endif
1332         buffer->cpus = nr_cpu_ids;
1333
1334         bsize = sizeof(void *) * nr_cpu_ids;
1335         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1336                                   GFP_KERNEL);
1337         if (!buffer->buffers)
1338                 goto fail_free_cpumask;
1339
1340         for_each_buffer_cpu(buffer, cpu) {
1341                 buffer->buffers[cpu] =
1342                         rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1343                 if (!buffer->buffers[cpu])
1344                         goto fail_free_buffers;
1345         }
1346
1347 #ifdef CONFIG_HOTPLUG_CPU
1348         buffer->cpu_notify.notifier_call = rb_cpu_notify;
1349         buffer->cpu_notify.priority = 0;
1350         __register_cpu_notifier(&buffer->cpu_notify);
1351         cpu_notifier_register_done();
1352 #endif
1353
1354         mutex_init(&buffer->mutex);
1355
1356         return buffer;
1357
1358  fail_free_buffers:
1359         for_each_buffer_cpu(buffer, cpu) {
1360                 if (buffer->buffers[cpu])
1361                         rb_free_cpu_buffer(buffer->buffers[cpu]);
1362         }
1363         kfree(buffer->buffers);
1364
1365  fail_free_cpumask:
1366         free_cpumask_var(buffer->cpumask);
1367 #ifdef CONFIG_HOTPLUG_CPU
1368         cpu_notifier_register_done();
1369 #endif
1370
1371  fail_free_buffer:
1372         kfree(buffer);
1373         return NULL;
1374 }
1375 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1376
1377 /**
1378  * ring_buffer_free - free a ring buffer.
1379  * @buffer: the buffer to free.
1380  */
1381 void
1382 ring_buffer_free(struct ring_buffer *buffer)
1383 {
1384         int cpu;
1385
1386 #ifdef CONFIG_HOTPLUG_CPU
1387         cpu_notifier_register_begin();
1388         __unregister_cpu_notifier(&buffer->cpu_notify);
1389 #endif
1390
1391         for_each_buffer_cpu(buffer, cpu)
1392                 rb_free_cpu_buffer(buffer->buffers[cpu]);
1393
1394 #ifdef CONFIG_HOTPLUG_CPU
1395         cpu_notifier_register_done();
1396 #endif
1397
1398         kfree(buffer->buffers);
1399         free_cpumask_var(buffer->cpumask);
1400
1401         kfree(buffer);
1402 }
1403 EXPORT_SYMBOL_GPL(ring_buffer_free);
1404
1405 void ring_buffer_set_clock(struct ring_buffer *buffer,
1406                            u64 (*clock)(void))
1407 {
1408         buffer->clock = clock;
1409 }
1410
1411 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1412
1413 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1414 {
1415         return local_read(&bpage->entries) & RB_WRITE_MASK;
1416 }
1417
1418 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1419 {
1420         return local_read(&bpage->write) & RB_WRITE_MASK;
1421 }
1422
1423 static int
1424 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1425 {
1426         struct list_head *tail_page, *to_remove, *next_page;
1427         struct buffer_page *to_remove_page, *tmp_iter_page;
1428         struct buffer_page *last_page, *first_page;
1429         unsigned long nr_removed;
1430         unsigned long head_bit;
1431         int page_entries;
1432
1433         head_bit = 0;
1434
1435         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1436         atomic_inc(&cpu_buffer->record_disabled);
1437         /*
1438          * We don't race with the readers since we have acquired the reader
1439          * lock. We also don't race with writers after disabling recording.
1440          * This makes it easy to figure out the first and the last page to be
1441          * removed from the list. We unlink all the pages in between including
1442          * the first and last pages. This is done in a busy loop so that we
1443          * lose the least number of traces.
1444          * The pages are freed after we restart recording and unlock readers.
1445          */
1446         tail_page = &cpu_buffer->tail_page->list;
1447
1448         /*
1449          * tail page might be on reader page, we remove the next page
1450          * from the ring buffer
1451          */
1452         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1453                 tail_page = rb_list_head(tail_page->next);
1454         to_remove = tail_page;
1455
1456         /* start of pages to remove */
1457         first_page = list_entry(rb_list_head(to_remove->next),
1458                                 struct buffer_page, list);
1459
1460         for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1461                 to_remove = rb_list_head(to_remove)->next;
1462                 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1463         }
1464
1465         next_page = rb_list_head(to_remove)->next;
1466
1467         /*
1468          * Now we remove all pages between tail_page and next_page.
1469          * Make sure that we have head_bit value preserved for the
1470          * next page
1471          */
1472         tail_page->next = (struct list_head *)((unsigned long)next_page |
1473                                                 head_bit);
1474         next_page = rb_list_head(next_page);
1475         next_page->prev = tail_page;
1476
1477         /* make sure pages points to a valid page in the ring buffer */
1478         cpu_buffer->pages = next_page;
1479
1480         /* update head page */
1481         if (head_bit)
1482                 cpu_buffer->head_page = list_entry(next_page,
1483                                                 struct buffer_page, list);
1484
1485         /*
1486          * change read pointer to make sure any read iterators reset
1487          * themselves
1488          */
1489         cpu_buffer->read = 0;
1490
1491         /* pages are removed, resume tracing and then free the pages */
1492         atomic_dec(&cpu_buffer->record_disabled);
1493         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1494
1495         RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1496
1497         /* last buffer page to remove */
1498         last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1499                                 list);
1500         tmp_iter_page = first_page;
1501
1502         do {
1503                 to_remove_page = tmp_iter_page;
1504                 rb_inc_page(cpu_buffer, &tmp_iter_page);
1505
1506                 /* update the counters */
1507                 page_entries = rb_page_entries(to_remove_page);
1508                 if (page_entries) {
1509                         /*
1510                          * If something was added to this page, it was full
1511                          * since it is not the tail page. So we deduct the
1512                          * bytes consumed in ring buffer from here.
1513                          * Increment overrun to account for the lost events.
1514                          */
1515                         local_add(page_entries, &cpu_buffer->overrun);
1516                         local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1517                 }
1518
1519                 /*
1520                  * We have already removed references to this list item, just
1521                  * free up the buffer_page and its page
1522                  */
1523                 free_buffer_page(to_remove_page);
1524                 nr_removed--;
1525
1526         } while (to_remove_page != last_page);
1527
1528         RB_WARN_ON(cpu_buffer, nr_removed);
1529
1530         return nr_removed == 0;
1531 }
1532
1533 static int
1534 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1535 {
1536         struct list_head *pages = &cpu_buffer->new_pages;
1537         int retries, success;
1538
1539         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1540         /*
1541          * We are holding the reader lock, so the reader page won't be swapped
1542          * in the ring buffer. Now we are racing with the writer trying to
1543          * move head page and the tail page.
1544          * We are going to adapt the reader page update process where:
1545          * 1. We first splice the start and end of list of new pages between
1546          *    the head page and its previous page.
1547          * 2. We cmpxchg the prev_page->next to point from head page to the
1548          *    start of new pages list.
1549          * 3. Finally, we update the head->prev to the end of new list.
1550          *
1551          * We will try this process 10 times, to make sure that we don't keep
1552          * spinning.
1553          */
1554         retries = 10;
1555         success = 0;
1556         while (retries--) {
1557                 struct list_head *head_page, *prev_page, *r;
1558                 struct list_head *last_page, *first_page;
1559                 struct list_head *head_page_with_bit;
1560
1561                 head_page = &rb_set_head_page(cpu_buffer)->list;
1562                 if (!head_page)
1563                         break;
1564                 prev_page = head_page->prev;
1565
1566                 first_page = pages->next;
1567                 last_page  = pages->prev;
1568
1569                 head_page_with_bit = (struct list_head *)
1570                                      ((unsigned long)head_page | RB_PAGE_HEAD);
1571
1572                 last_page->next = head_page_with_bit;
1573                 first_page->prev = prev_page;
1574
1575                 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1576
1577                 if (r == head_page_with_bit) {
1578                         /*
1579                          * yay, we replaced the page pointer to our new list,
1580                          * now, we just have to update to head page's prev
1581                          * pointer to point to end of list
1582                          */
1583                         head_page->prev = last_page;
1584                         success = 1;
1585                         break;
1586                 }
1587         }
1588
1589         if (success)
1590                 INIT_LIST_HEAD(pages);
1591         /*
1592          * If we weren't successful in adding in new pages, warn and stop
1593          * tracing
1594          */
1595         RB_WARN_ON(cpu_buffer, !success);
1596         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1597
1598         /* free pages if they weren't inserted */
1599         if (!success) {
1600                 struct buffer_page *bpage, *tmp;
1601                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1602                                          list) {
1603                         list_del_init(&bpage->list);
1604                         free_buffer_page(bpage);
1605                 }
1606         }
1607         return success;
1608 }
1609
1610 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1611 {
1612         int success;
1613
1614         if (cpu_buffer->nr_pages_to_update > 0)
1615                 success = rb_insert_pages(cpu_buffer);
1616         else
1617                 success = rb_remove_pages(cpu_buffer,
1618                                         -cpu_buffer->nr_pages_to_update);
1619
1620         if (success)
1621                 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1622 }
1623
1624 static void update_pages_handler(struct work_struct *work)
1625 {
1626         struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1627                         struct ring_buffer_per_cpu, update_pages_work);
1628         rb_update_pages(cpu_buffer);
1629         complete(&cpu_buffer->update_done);
1630 }
1631
1632 /**
1633  * ring_buffer_resize - resize the ring buffer
1634  * @buffer: the buffer to resize.
1635  * @size: the new size.
1636  * @cpu_id: the cpu buffer to resize
1637  *
1638  * Minimum size is 2 * BUF_PAGE_SIZE.
1639  *
1640  * Returns 0 on success and < 0 on failure.
1641  */
1642 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1643                         int cpu_id)
1644 {
1645         struct ring_buffer_per_cpu *cpu_buffer;
1646         unsigned long nr_pages;
1647         int cpu, err = 0;
1648
1649         /*
1650          * Always succeed at resizing a non-existent buffer:
1651          */
1652         if (!buffer)
1653                 return size;
1654
1655         /* Make sure the requested buffer exists */
1656         if (cpu_id != RING_BUFFER_ALL_CPUS &&
1657             !cpumask_test_cpu(cpu_id, buffer->cpumask))
1658                 return size;
1659
1660         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1661
1662         /* we need a minimum of two pages */
1663         if (nr_pages < 2)
1664                 nr_pages = 2;
1665
1666         size = nr_pages * BUF_PAGE_SIZE;
1667
1668         /*
1669          * Don't succeed if resizing is disabled, as a reader might be
1670          * manipulating the ring buffer and is expecting a sane state while
1671          * this is true.
1672          */
1673         if (atomic_read(&buffer->resize_disabled))
1674                 return -EBUSY;
1675
1676         /* prevent another thread from changing buffer sizes */
1677         mutex_lock(&buffer->mutex);
1678
1679         if (cpu_id == RING_BUFFER_ALL_CPUS) {
1680                 /* calculate the pages to update */
1681                 for_each_buffer_cpu(buffer, cpu) {
1682                         cpu_buffer = buffer->buffers[cpu];
1683
1684                         cpu_buffer->nr_pages_to_update = nr_pages -
1685                                                         cpu_buffer->nr_pages;
1686                         /*
1687                          * nothing more to do for removing pages or no update
1688                          */
1689                         if (cpu_buffer->nr_pages_to_update <= 0)
1690                                 continue;
1691                         /*
1692                          * to add pages, make sure all new pages can be
1693                          * allocated without receiving ENOMEM
1694                          */
1695                         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1696                         if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1697                                                 &cpu_buffer->new_pages, cpu)) {
1698                                 /* not enough memory for new pages */
1699                                 err = -ENOMEM;
1700                                 goto out_err;
1701                         }
1702                 }
1703
1704                 get_online_cpus();
1705                 /*
1706                  * Fire off all the required work handlers
1707                  * We can't schedule on offline CPUs, but it's not necessary
1708                  * since we can change their buffer sizes without any race.
1709                  */
1710                 for_each_buffer_cpu(buffer, cpu) {
1711                         cpu_buffer = buffer->buffers[cpu];
1712                         if (!cpu_buffer->nr_pages_to_update)
1713                                 continue;
1714
1715                         /* Can't run something on an offline CPU. */
1716                         if (!cpu_online(cpu)) {
1717                                 rb_update_pages(cpu_buffer);
1718                                 cpu_buffer->nr_pages_to_update = 0;
1719                         } else {
1720                                 schedule_work_on(cpu,
1721                                                 &cpu_buffer->update_pages_work);
1722                         }
1723                 }
1724
1725                 /* wait for all the updates to complete */
1726                 for_each_buffer_cpu(buffer, cpu) {
1727                         cpu_buffer = buffer->buffers[cpu];
1728                         if (!cpu_buffer->nr_pages_to_update)
1729                                 continue;
1730
1731                         if (cpu_online(cpu))
1732                                 wait_for_completion(&cpu_buffer->update_done);
1733                         cpu_buffer->nr_pages_to_update = 0;
1734                 }
1735
1736                 put_online_cpus();
1737         } else {
1738                 /* Make sure this CPU has been intitialized */
1739                 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1740                         goto out;
1741
1742                 cpu_buffer = buffer->buffers[cpu_id];
1743
1744                 if (nr_pages == cpu_buffer->nr_pages)
1745                         goto out;
1746
1747                 cpu_buffer->nr_pages_to_update = nr_pages -
1748                                                 cpu_buffer->nr_pages;
1749
1750                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1751                 if (cpu_buffer->nr_pages_to_update > 0 &&
1752                         __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1753                                             &cpu_buffer->new_pages, cpu_id)) {
1754                         err = -ENOMEM;
1755                         goto out_err;
1756                 }
1757
1758                 get_online_cpus();
1759
1760                 /* Can't run something on an offline CPU. */
1761                 if (!cpu_online(cpu_id))
1762                         rb_update_pages(cpu_buffer);
1763                 else {
1764                         schedule_work_on(cpu_id,
1765                                          &cpu_buffer->update_pages_work);
1766                         wait_for_completion(&cpu_buffer->update_done);
1767                 }
1768
1769                 cpu_buffer->nr_pages_to_update = 0;
1770                 put_online_cpus();
1771         }
1772
1773  out:
1774         /*
1775          * The ring buffer resize can happen with the ring buffer
1776          * enabled, so that the update disturbs the tracing as little
1777          * as possible. But if the buffer is disabled, we do not need
1778          * to worry about that, and we can take the time to verify
1779          * that the buffer is not corrupt.
1780          */
1781         if (atomic_read(&buffer->record_disabled)) {
1782                 atomic_inc(&buffer->record_disabled);
1783                 /*
1784                  * Even though the buffer was disabled, we must make sure
1785                  * that it is truly disabled before calling rb_check_pages.
1786                  * There could have been a race between checking
1787                  * record_disable and incrementing it.
1788                  */
1789                 synchronize_sched();
1790                 for_each_buffer_cpu(buffer, cpu) {
1791                         cpu_buffer = buffer->buffers[cpu];
1792                         rb_check_pages(cpu_buffer);
1793                 }
1794                 atomic_dec(&buffer->record_disabled);
1795         }
1796
1797         mutex_unlock(&buffer->mutex);
1798         return size;
1799
1800  out_err:
1801         for_each_buffer_cpu(buffer, cpu) {
1802                 struct buffer_page *bpage, *tmp;
1803
1804                 cpu_buffer = buffer->buffers[cpu];
1805                 cpu_buffer->nr_pages_to_update = 0;
1806
1807                 if (list_empty(&cpu_buffer->new_pages))
1808                         continue;
1809
1810                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1811                                         list) {
1812                         list_del_init(&bpage->list);
1813                         free_buffer_page(bpage);
1814                 }
1815         }
1816         mutex_unlock(&buffer->mutex);
1817         return err;
1818 }
1819 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1820
1821 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1822 {
1823         mutex_lock(&buffer->mutex);
1824         if (val)
1825                 buffer->flags |= RB_FL_OVERWRITE;
1826         else
1827                 buffer->flags &= ~RB_FL_OVERWRITE;
1828         mutex_unlock(&buffer->mutex);
1829 }
1830 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1831
1832 static inline void *
1833 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1834 {
1835         return bpage->data + index;
1836 }
1837
1838 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1839 {
1840         return bpage->page->data + index;
1841 }
1842
1843 static inline struct ring_buffer_event *
1844 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1845 {
1846         return __rb_page_index(cpu_buffer->reader_page,
1847                                cpu_buffer->reader_page->read);
1848 }
1849
1850 static inline struct ring_buffer_event *
1851 rb_iter_head_event(struct ring_buffer_iter *iter)
1852 {
1853         return __rb_page_index(iter->head_page, iter->head);
1854 }
1855
1856 static inline unsigned rb_page_commit(struct buffer_page *bpage)
1857 {
1858         return local_read(&bpage->page->commit);
1859 }
1860
1861 /* Size is determined by what has been committed */
1862 static inline unsigned rb_page_size(struct buffer_page *bpage)
1863 {
1864         return rb_page_commit(bpage);
1865 }
1866
1867 static inline unsigned
1868 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1869 {
1870         return rb_page_commit(cpu_buffer->commit_page);
1871 }
1872
1873 static inline unsigned
1874 rb_event_index(struct ring_buffer_event *event)
1875 {
1876         unsigned long addr = (unsigned long)event;
1877
1878         return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1879 }
1880
1881 static void rb_inc_iter(struct ring_buffer_iter *iter)
1882 {
1883         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1884
1885         /*
1886          * The iterator could be on the reader page (it starts there).
1887          * But the head could have moved, since the reader was
1888          * found. Check for this case and assign the iterator
1889          * to the head page instead of next.
1890          */
1891         if (iter->head_page == cpu_buffer->reader_page)
1892                 iter->head_page = rb_set_head_page(cpu_buffer);
1893         else
1894                 rb_inc_page(cpu_buffer, &iter->head_page);
1895
1896         iter->read_stamp = iter->head_page->page->time_stamp;
1897         iter->head = 0;
1898 }
1899
1900 /*
1901  * rb_handle_head_page - writer hit the head page
1902  *
1903  * Returns: +1 to retry page
1904  *           0 to continue
1905  *          -1 on error
1906  */
1907 static int
1908 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1909                     struct buffer_page *tail_page,
1910                     struct buffer_page *next_page)
1911 {
1912         struct buffer_page *new_head;
1913         int entries;
1914         int type;
1915         int ret;
1916
1917         entries = rb_page_entries(next_page);
1918
1919         /*
1920          * The hard part is here. We need to move the head
1921          * forward, and protect against both readers on
1922          * other CPUs and writers coming in via interrupts.
1923          */
1924         type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1925                                        RB_PAGE_HEAD);
1926
1927         /*
1928          * type can be one of four:
1929          *  NORMAL - an interrupt already moved it for us
1930          *  HEAD   - we are the first to get here.
1931          *  UPDATE - we are the interrupt interrupting
1932          *           a current move.
1933          *  MOVED  - a reader on another CPU moved the next
1934          *           pointer to its reader page. Give up
1935          *           and try again.
1936          */
1937
1938         switch (type) {
1939         case RB_PAGE_HEAD:
1940                 /*
1941                  * We changed the head to UPDATE, thus
1942                  * it is our responsibility to update
1943                  * the counters.
1944                  */
1945                 local_add(entries, &cpu_buffer->overrun);
1946                 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1947
1948                 /*
1949                  * The entries will be zeroed out when we move the
1950                  * tail page.
1951                  */
1952
1953                 /* still more to do */
1954                 break;
1955
1956         case RB_PAGE_UPDATE:
1957                 /*
1958                  * This is an interrupt that interrupt the
1959                  * previous update. Still more to do.
1960                  */
1961                 break;
1962         case RB_PAGE_NORMAL:
1963                 /*
1964                  * An interrupt came in before the update
1965                  * and processed this for us.
1966                  * Nothing left to do.
1967                  */
1968                 return 1;
1969         case RB_PAGE_MOVED:
1970                 /*
1971                  * The reader is on another CPU and just did
1972                  * a swap with our next_page.
1973                  * Try again.
1974                  */
1975                 return 1;
1976         default:
1977                 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1978                 return -1;
1979         }
1980
1981         /*
1982          * Now that we are here, the old head pointer is
1983          * set to UPDATE. This will keep the reader from
1984          * swapping the head page with the reader page.
1985          * The reader (on another CPU) will spin till
1986          * we are finished.
1987          *
1988          * We just need to protect against interrupts
1989          * doing the job. We will set the next pointer
1990          * to HEAD. After that, we set the old pointer
1991          * to NORMAL, but only if it was HEAD before.
1992          * otherwise we are an interrupt, and only
1993          * want the outer most commit to reset it.
1994          */
1995         new_head = next_page;
1996         rb_inc_page(cpu_buffer, &new_head);
1997
1998         ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1999                                     RB_PAGE_NORMAL);
2000
2001         /*
2002          * Valid returns are:
2003          *  HEAD   - an interrupt came in and already set it.
2004          *  NORMAL - One of two things:
2005          *            1) We really set it.
2006          *            2) A bunch of interrupts came in and moved
2007          *               the page forward again.
2008          */
2009         switch (ret) {
2010         case RB_PAGE_HEAD:
2011         case RB_PAGE_NORMAL:
2012                 /* OK */
2013                 break;
2014         default:
2015                 RB_WARN_ON(cpu_buffer, 1);
2016                 return -1;
2017         }
2018
2019         /*
2020          * It is possible that an interrupt came in,
2021          * set the head up, then more interrupts came in
2022          * and moved it again. When we get back here,
2023          * the page would have been set to NORMAL but we
2024          * just set it back to HEAD.
2025          *
2026          * How do you detect this? Well, if that happened
2027          * the tail page would have moved.
2028          */
2029         if (ret == RB_PAGE_NORMAL) {
2030                 struct buffer_page *buffer_tail_page;
2031
2032                 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2033                 /*
2034                  * If the tail had moved passed next, then we need
2035                  * to reset the pointer.
2036                  */
2037                 if (buffer_tail_page != tail_page &&
2038                     buffer_tail_page != next_page)
2039                         rb_head_page_set_normal(cpu_buffer, new_head,
2040                                                 next_page,
2041                                                 RB_PAGE_HEAD);
2042         }
2043
2044         /*
2045          * If this was the outer most commit (the one that
2046          * changed the original pointer from HEAD to UPDATE),
2047          * then it is up to us to reset it to NORMAL.
2048          */
2049         if (type == RB_PAGE_HEAD) {
2050                 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2051                                               tail_page,
2052                                               RB_PAGE_UPDATE);
2053                 if (RB_WARN_ON(cpu_buffer,
2054                                ret != RB_PAGE_UPDATE))
2055                         return -1;
2056         }
2057
2058         return 0;
2059 }
2060
2061 static inline void
2062 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2063               unsigned long tail, struct rb_event_info *info)
2064 {
2065         struct buffer_page *tail_page = info->tail_page;
2066         struct ring_buffer_event *event;
2067         unsigned long length = info->length;
2068
2069         /*
2070          * Only the event that crossed the page boundary
2071          * must fill the old tail_page with padding.
2072          */
2073         if (tail >= BUF_PAGE_SIZE) {
2074                 /*
2075                  * If the page was filled, then we still need
2076                  * to update the real_end. Reset it to zero
2077                  * and the reader will ignore it.
2078                  */
2079                 if (tail == BUF_PAGE_SIZE)
2080                         tail_page->real_end = 0;
2081
2082                 local_sub(length, &tail_page->write);
2083                 return;
2084         }
2085
2086         event = __rb_page_index(tail_page, tail);
2087         kmemcheck_annotate_bitfield(event, bitfield);
2088
2089         /* account for padding bytes */
2090         local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2091
2092         /*
2093          * Save the original length to the meta data.
2094          * This will be used by the reader to add lost event
2095          * counter.
2096          */
2097         tail_page->real_end = tail;
2098
2099         /*
2100          * If this event is bigger than the minimum size, then
2101          * we need to be careful that we don't subtract the
2102          * write counter enough to allow another writer to slip
2103          * in on this page.
2104          * We put in a discarded commit instead, to make sure
2105          * that this space is not used again.
2106          *
2107          * If we are less than the minimum size, we don't need to
2108          * worry about it.
2109          */
2110         if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2111                 /* No room for any events */
2112
2113                 /* Mark the rest of the page with padding */
2114                 rb_event_set_padding(event);
2115
2116                 /* Set the write back to the previous setting */
2117                 local_sub(length, &tail_page->write);
2118                 return;
2119         }
2120
2121         /* Put in a discarded event */
2122         event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2123         event->type_len = RINGBUF_TYPE_PADDING;
2124         /* time delta must be non zero */
2125         event->time_delta = 1;
2126
2127         /* Set write to end of buffer */
2128         length = (tail + length) - BUF_PAGE_SIZE;
2129         local_sub(length, &tail_page->write);
2130 }
2131
2132 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2133
2134 /*
2135  * This is the slow path, force gcc not to inline it.
2136  */
2137 static noinline struct ring_buffer_event *
2138 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2139              unsigned long tail, struct rb_event_info *info)
2140 {
2141         struct buffer_page *tail_page = info->tail_page;
2142         struct buffer_page *commit_page = cpu_buffer->commit_page;
2143         struct ring_buffer *buffer = cpu_buffer->buffer;
2144         struct buffer_page *next_page;
2145         int ret;
2146
2147         next_page = tail_page;
2148
2149         rb_inc_page(cpu_buffer, &next_page);
2150
2151         /*
2152          * If for some reason, we had an interrupt storm that made
2153          * it all the way around the buffer, bail, and warn
2154          * about it.
2155          */
2156         if (unlikely(next_page == commit_page)) {
2157                 local_inc(&cpu_buffer->commit_overrun);
2158                 goto out_reset;
2159         }
2160
2161         /*
2162          * This is where the fun begins!
2163          *
2164          * We are fighting against races between a reader that
2165          * could be on another CPU trying to swap its reader
2166          * page with the buffer head.
2167          *
2168          * We are also fighting against interrupts coming in and
2169          * moving the head or tail on us as well.
2170          *
2171          * If the next page is the head page then we have filled
2172          * the buffer, unless the commit page is still on the
2173          * reader page.
2174          */
2175         if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2176
2177                 /*
2178                  * If the commit is not on the reader page, then
2179                  * move the header page.
2180                  */
2181                 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2182                         /*
2183                          * If we are not in overwrite mode,
2184                          * this is easy, just stop here.
2185                          */
2186                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
2187                                 local_inc(&cpu_buffer->dropped_events);
2188                                 goto out_reset;
2189                         }
2190
2191                         ret = rb_handle_head_page(cpu_buffer,
2192                                                   tail_page,
2193                                                   next_page);
2194                         if (ret < 0)
2195                                 goto out_reset;
2196                         if (ret)
2197                                 goto out_again;
2198                 } else {
2199                         /*
2200                          * We need to be careful here too. The
2201                          * commit page could still be on the reader
2202                          * page. We could have a small buffer, and
2203                          * have filled up the buffer with events
2204                          * from interrupts and such, and wrapped.
2205                          *
2206                          * Note, if the tail page is also the on the
2207                          * reader_page, we let it move out.
2208                          */
2209                         if (unlikely((cpu_buffer->commit_page !=
2210                                       cpu_buffer->tail_page) &&
2211                                      (cpu_buffer->commit_page ==
2212                                       cpu_buffer->reader_page))) {
2213                                 local_inc(&cpu_buffer->commit_overrun);
2214                                 goto out_reset;
2215                         }
2216                 }
2217         }
2218
2219         rb_tail_page_update(cpu_buffer, tail_page, next_page);
2220
2221  out_again:
2222
2223         rb_reset_tail(cpu_buffer, tail, info);
2224
2225         /* Commit what we have for now. */
2226         rb_end_commit(cpu_buffer);
2227         /* rb_end_commit() decs committing */
2228         local_inc(&cpu_buffer->committing);
2229
2230         /* fail and let the caller try again */
2231         return ERR_PTR(-EAGAIN);
2232
2233  out_reset:
2234         /* reset write */
2235         rb_reset_tail(cpu_buffer, tail, info);
2236
2237         return NULL;
2238 }
2239
2240 /* Slow path, do not inline */
2241 static noinline struct ring_buffer_event *
2242 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
2243 {
2244         event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2245
2246         /* Not the first event on the page? */
2247         if (rb_event_index(event)) {
2248                 event->time_delta = delta & TS_MASK;
2249                 event->array[0] = delta >> TS_SHIFT;
2250         } else {
2251                 /* nope, just zero it */
2252                 event->time_delta = 0;
2253                 event->array[0] = 0;
2254         }
2255
2256         return skip_time_extend(event);
2257 }
2258
2259 static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2260                                      struct ring_buffer_event *event);
2261
2262 /**
2263  * rb_update_event - update event type and data
2264  * @event: the event to update
2265  * @type: the type of event
2266  * @length: the size of the event field in the ring buffer
2267  *
2268  * Update the type and data fields of the event. The length
2269  * is the actual size that is written to the ring buffer,
2270  * and with this, we can determine what to place into the
2271  * data field.
2272  */
2273 static void
2274 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2275                 struct ring_buffer_event *event,
2276                 struct rb_event_info *info)
2277 {
2278         unsigned length = info->length;
2279         u64 delta = info->delta;
2280
2281         /* Only a commit updates the timestamp */
2282         if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2283                 delta = 0;
2284
2285         /*
2286          * If we need to add a timestamp, then we
2287          * add it to the start of the resevered space.
2288          */
2289         if (unlikely(info->add_timestamp)) {
2290                 event = rb_add_time_stamp(event, delta);
2291                 length -= RB_LEN_TIME_EXTEND;
2292                 delta = 0;
2293         }
2294
2295         event->time_delta = delta;
2296         length -= RB_EVNT_HDR_SIZE;
2297         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2298                 event->type_len = 0;
2299                 event->array[0] = length;
2300         } else
2301                 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2302 }
2303
2304 static unsigned rb_calculate_event_length(unsigned length)
2305 {
2306         struct ring_buffer_event event; /* Used only for sizeof array */
2307
2308         /* zero length can cause confusions */
2309         if (!length)
2310                 length++;
2311
2312         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2313                 length += sizeof(event.array[0]);
2314
2315         length += RB_EVNT_HDR_SIZE;
2316         length = ALIGN(length, RB_ARCH_ALIGNMENT);
2317
2318         /*
2319          * In case the time delta is larger than the 27 bits for it
2320          * in the header, we need to add a timestamp. If another
2321          * event comes in when trying to discard this one to increase
2322          * the length, then the timestamp will be added in the allocated
2323          * space of this event. If length is bigger than the size needed
2324          * for the TIME_EXTEND, then padding has to be used. The events
2325          * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2326          * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2327          * As length is a multiple of 4, we only need to worry if it
2328          * is 12 (RB_LEN_TIME_EXTEND + 4).
2329          */
2330         if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2331                 length += RB_ALIGNMENT;
2332
2333         return length;
2334 }
2335
2336 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2337 static inline bool sched_clock_stable(void)
2338 {
2339         return true;
2340 }
2341 #endif
2342
2343 static inline int
2344 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2345                   struct ring_buffer_event *event)
2346 {
2347         unsigned long new_index, old_index;
2348         struct buffer_page *bpage;
2349         unsigned long index;
2350         unsigned long addr;
2351
2352         new_index = rb_event_index(event);
2353         old_index = new_index + rb_event_ts_length(event);
2354         addr = (unsigned long)event;
2355         addr &= PAGE_MASK;
2356
2357         bpage = READ_ONCE(cpu_buffer->tail_page);
2358
2359         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2360                 unsigned long write_mask =
2361                         local_read(&bpage->write) & ~RB_WRITE_MASK;
2362                 unsigned long event_length = rb_event_length(event);
2363                 /*
2364                  * This is on the tail page. It is possible that
2365                  * a write could come in and move the tail page
2366                  * and write to the next page. That is fine
2367                  * because we just shorten what is on this page.
2368                  */
2369                 old_index += write_mask;
2370                 new_index += write_mask;
2371                 index = local_cmpxchg(&bpage->write, old_index, new_index);
2372                 if (index == old_index) {
2373                         /* update counters */
2374                         local_sub(event_length, &cpu_buffer->entries_bytes);
2375                         return 1;
2376                 }
2377         }
2378
2379         /* could not discard */
2380         return 0;
2381 }
2382
2383 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2384 {
2385         local_inc(&cpu_buffer->committing);
2386         local_inc(&cpu_buffer->commits);
2387 }
2388
2389 static void
2390 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2391 {
2392         unsigned long max_count;
2393
2394         /*
2395          * We only race with interrupts and NMIs on this CPU.
2396          * If we own the commit event, then we can commit
2397          * all others that interrupted us, since the interruptions
2398          * are in stack format (they finish before they come
2399          * back to us). This allows us to do a simple loop to
2400          * assign the commit to the tail.
2401          */
2402  again:
2403         max_count = cpu_buffer->nr_pages * 100;
2404
2405         while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2406                 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2407                         return;
2408                 if (RB_WARN_ON(cpu_buffer,
2409                                rb_is_reader_page(cpu_buffer->tail_page)))
2410                         return;
2411                 local_set(&cpu_buffer->commit_page->page->commit,
2412                           rb_page_write(cpu_buffer->commit_page));
2413                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2414                 /* Only update the write stamp if the page has an event */
2415                 if (rb_page_write(cpu_buffer->commit_page))
2416                         cpu_buffer->write_stamp =
2417                                 cpu_buffer->commit_page->page->time_stamp;
2418                 /* add barrier to keep gcc from optimizing too much */
2419                 barrier();
2420         }
2421         while (rb_commit_index(cpu_buffer) !=
2422                rb_page_write(cpu_buffer->commit_page)) {
2423
2424                 local_set(&cpu_buffer->commit_page->page->commit,
2425                           rb_page_write(cpu_buffer->commit_page));
2426                 RB_WARN_ON(cpu_buffer,
2427                            local_read(&cpu_buffer->commit_page->page->commit) &
2428                            ~RB_WRITE_MASK);
2429                 barrier();
2430         }
2431
2432         /* again, keep gcc from optimizing */
2433         barrier();
2434
2435         /*
2436          * If an interrupt came in just after the first while loop
2437          * and pushed the tail page forward, we will be left with
2438          * a dangling commit that will never go forward.
2439          */
2440         if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2441                 goto again;
2442 }
2443
2444 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2445 {
2446         unsigned long commits;
2447
2448         if (RB_WARN_ON(cpu_buffer,
2449                        !local_read(&cpu_buffer->committing)))
2450                 return;
2451
2452  again:
2453         commits = local_read(&cpu_buffer->commits);
2454         /* synchronize with interrupts */
2455         barrier();
2456         if (local_read(&cpu_buffer->committing) == 1)
2457                 rb_set_commit_to_write(cpu_buffer);
2458
2459         local_dec(&cpu_buffer->committing);
2460
2461         /* synchronize with interrupts */
2462         barrier();
2463
2464         /*
2465          * Need to account for interrupts coming in between the
2466          * updating of the commit page and the clearing of the
2467          * committing counter.
2468          */
2469         if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2470             !local_read(&cpu_buffer->committing)) {
2471                 local_inc(&cpu_buffer->committing);
2472                 goto again;
2473         }
2474 }
2475
2476 static inline void rb_event_discard(struct ring_buffer_event *event)
2477 {
2478         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2479                 event = skip_time_extend(event);
2480
2481         /* array[0] holds the actual length for the discarded event */
2482         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2483         event->type_len = RINGBUF_TYPE_PADDING;
2484         /* time delta must be non zero */
2485         if (!event->time_delta)
2486                 event->time_delta = 1;
2487 }
2488
2489 static inline bool
2490 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2491                    struct ring_buffer_event *event)
2492 {
2493         unsigned long addr = (unsigned long)event;
2494         unsigned long index;
2495
2496         index = rb_event_index(event);
2497         addr &= PAGE_MASK;
2498
2499         return cpu_buffer->commit_page->page == (void *)addr &&
2500                 rb_commit_index(cpu_buffer) == index;
2501 }
2502
2503 static void
2504 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2505                       struct ring_buffer_event *event)
2506 {
2507         u64 delta;
2508
2509         /*
2510          * The event first in the commit queue updates the
2511          * time stamp.
2512          */
2513         if (rb_event_is_commit(cpu_buffer, event)) {
2514                 /*
2515                  * A commit event that is first on a page
2516                  * updates the write timestamp with the page stamp
2517                  */
2518                 if (!rb_event_index(event))
2519                         cpu_buffer->write_stamp =
2520                                 cpu_buffer->commit_page->page->time_stamp;
2521                 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2522                         delta = event->array[0];
2523                         delta <<= TS_SHIFT;
2524                         delta += event->time_delta;
2525                         cpu_buffer->write_stamp += delta;
2526                 } else
2527                         cpu_buffer->write_stamp += event->time_delta;
2528         }
2529 }
2530
2531 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2532                       struct ring_buffer_event *event)
2533 {
2534         local_inc(&cpu_buffer->entries);
2535         rb_update_write_stamp(cpu_buffer, event);
2536         rb_end_commit(cpu_buffer);
2537 }
2538
2539 static __always_inline void
2540 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2541 {
2542         bool pagebusy;
2543
2544         if (buffer->irq_work.waiters_pending) {
2545                 buffer->irq_work.waiters_pending = false;
2546                 /* irq_work_queue() supplies it's own memory barriers */
2547                 irq_work_queue(&buffer->irq_work.work);
2548         }
2549
2550         if (cpu_buffer->irq_work.waiters_pending) {
2551                 cpu_buffer->irq_work.waiters_pending = false;
2552                 /* irq_work_queue() supplies it's own memory barriers */
2553                 irq_work_queue(&cpu_buffer->irq_work.work);
2554         }
2555
2556         pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
2557
2558         if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
2559                 cpu_buffer->irq_work.wakeup_full = true;
2560                 cpu_buffer->irq_work.full_waiters_pending = false;
2561                 /* irq_work_queue() supplies it's own memory barriers */
2562                 irq_work_queue(&cpu_buffer->irq_work.work);
2563         }
2564 }
2565
2566 /*
2567  * The lock and unlock are done within a preempt disable section.
2568  * The current_context per_cpu variable can only be modified
2569  * by the current task between lock and unlock. But it can
2570  * be modified more than once via an interrupt. To pass this
2571  * information from the lock to the unlock without having to
2572  * access the 'in_interrupt()' functions again (which do show
2573  * a bit of overhead in something as critical as function tracing,
2574  * we use a bitmask trick.
2575  *
2576  *  bit 0 =  NMI context
2577  *  bit 1 =  IRQ context
2578  *  bit 2 =  SoftIRQ context
2579  *  bit 3 =  normal context.
2580  *
2581  * This works because this is the order of contexts that can
2582  * preempt other contexts. A SoftIRQ never preempts an IRQ
2583  * context.
2584  *
2585  * When the context is determined, the corresponding bit is
2586  * checked and set (if it was set, then a recursion of that context
2587  * happened).
2588  *
2589  * On unlock, we need to clear this bit. To do so, just subtract
2590  * 1 from the current_context and AND it to itself.
2591  *
2592  * (binary)
2593  *  101 - 1 = 100
2594  *  101 & 100 = 100 (clearing bit zero)
2595  *
2596  *  1010 - 1 = 1001
2597  *  1010 & 1001 = 1000 (clearing bit 1)
2598  *
2599  * The least significant bit can be cleared this way, and it
2600  * just so happens that it is the same bit corresponding to
2601  * the current context.
2602  */
2603
2604 static __always_inline int
2605 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2606 {
2607         unsigned int val = cpu_buffer->current_context;
2608         int bit;
2609
2610         if (in_interrupt()) {
2611                 if (in_nmi())
2612                         bit = RB_CTX_NMI;
2613                 else if (in_irq())
2614                         bit = RB_CTX_IRQ;
2615                 else
2616                         bit = RB_CTX_SOFTIRQ;
2617         } else
2618                 bit = RB_CTX_NORMAL;
2619
2620         if (unlikely(val & (1 << bit)))
2621                 return 1;
2622
2623         val |= (1 << bit);
2624         cpu_buffer->current_context = val;
2625
2626         return 0;
2627 }
2628
2629 static __always_inline void
2630 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2631 {
2632         cpu_buffer->current_context &= cpu_buffer->current_context - 1;
2633 }
2634
2635 /**
2636  * ring_buffer_unlock_commit - commit a reserved
2637  * @buffer: The buffer to commit to
2638  * @event: The event pointer to commit.
2639  *
2640  * This commits the data to the ring buffer, and releases any locks held.
2641  *
2642  * Must be paired with ring_buffer_lock_reserve.
2643  */
2644 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2645                               struct ring_buffer_event *event)
2646 {
2647         struct ring_buffer_per_cpu *cpu_buffer;
2648         int cpu = raw_smp_processor_id();
2649
2650         cpu_buffer = buffer->buffers[cpu];
2651
2652         rb_commit(cpu_buffer, event);
2653
2654         rb_wakeups(buffer, cpu_buffer);
2655
2656         trace_recursive_unlock(cpu_buffer);
2657
2658         preempt_enable_notrace();
2659
2660         return 0;
2661 }
2662 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2663
2664 static noinline void
2665 rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2666                     struct rb_event_info *info)
2667 {
2668         WARN_ONCE(info->delta > (1ULL << 59),
2669                   KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2670                   (unsigned long long)info->delta,
2671                   (unsigned long long)info->ts,
2672                   (unsigned long long)cpu_buffer->write_stamp,
2673                   sched_clock_stable() ? "" :
2674                   "If you just came from a suspend/resume,\n"
2675                   "please switch to the trace global clock:\n"
2676                   "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2677         info->add_timestamp = 1;
2678 }
2679
2680 static struct ring_buffer_event *
2681 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2682                   struct rb_event_info *info)
2683 {
2684         struct ring_buffer_event *event;
2685         struct buffer_page *tail_page;
2686         unsigned long tail, write;
2687
2688         /*
2689          * If the time delta since the last event is too big to
2690          * hold in the time field of the event, then we append a
2691          * TIME EXTEND event ahead of the data event.
2692          */
2693         if (unlikely(info->add_timestamp))
2694                 info->length += RB_LEN_TIME_EXTEND;
2695
2696         /* Don't let the compiler play games with cpu_buffer->tail_page */
2697         tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
2698         write = local_add_return(info->length, &tail_page->write);
2699
2700         /* set write to only the index of the write */
2701         write &= RB_WRITE_MASK;
2702         tail = write - info->length;
2703
2704         /*
2705          * If this is the first commit on the page, then it has the same
2706          * timestamp as the page itself.
2707          */
2708         if (!tail)
2709                 info->delta = 0;
2710
2711         /* See if we shot pass the end of this buffer page */
2712         if (unlikely(write > BUF_PAGE_SIZE))
2713                 return rb_move_tail(cpu_buffer, tail, info);
2714
2715         /* We reserved something on the buffer */
2716
2717         event = __rb_page_index(tail_page, tail);
2718         kmemcheck_annotate_bitfield(event, bitfield);
2719         rb_update_event(cpu_buffer, event, info);
2720
2721         local_inc(&tail_page->entries);
2722
2723         /*
2724          * If this is the first commit on the page, then update
2725          * its timestamp.
2726          */
2727         if (!tail)
2728                 tail_page->page->time_stamp = info->ts;
2729
2730         /* account for these added bytes */
2731         local_add(info->length, &cpu_buffer->entries_bytes);
2732
2733         return event;
2734 }
2735
2736 static struct ring_buffer_event *
2737 rb_reserve_next_event(struct ring_buffer *buffer,
2738                       struct ring_buffer_per_cpu *cpu_buffer,
2739                       unsigned long length)
2740 {
2741         struct ring_buffer_event *event;
2742         struct rb_event_info info;
2743         int nr_loops = 0;
2744         u64 diff;
2745
2746         rb_start_commit(cpu_buffer);
2747
2748 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2749         /*
2750          * Due to the ability to swap a cpu buffer from a buffer
2751          * it is possible it was swapped before we committed.
2752          * (committing stops a swap). We check for it here and
2753          * if it happened, we have to fail the write.
2754          */
2755         barrier();
2756         if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2757                 local_dec(&cpu_buffer->committing);
2758                 local_dec(&cpu_buffer->commits);
2759                 return NULL;
2760         }
2761 #endif
2762
2763         info.length = rb_calculate_event_length(length);
2764  again:
2765         info.add_timestamp = 0;
2766         info.delta = 0;
2767
2768         /*
2769          * We allow for interrupts to reenter here and do a trace.
2770          * If one does, it will cause this original code to loop
2771          * back here. Even with heavy interrupts happening, this
2772          * should only happen a few times in a row. If this happens
2773          * 1000 times in a row, there must be either an interrupt
2774          * storm or we have something buggy.
2775          * Bail!
2776          */
2777         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2778                 goto out_fail;
2779
2780         info.ts = rb_time_stamp(cpu_buffer->buffer);
2781         diff = info.ts - cpu_buffer->write_stamp;
2782
2783         /* make sure this diff is calculated here */
2784         barrier();
2785
2786         /* Did the write stamp get updated already? */
2787         if (likely(info.ts >= cpu_buffer->write_stamp)) {
2788                 info.delta = diff;
2789                 if (unlikely(test_time_stamp(info.delta)))
2790                         rb_handle_timestamp(cpu_buffer, &info);
2791         }
2792
2793         event = __rb_reserve_next(cpu_buffer, &info);
2794
2795         if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2796                 if (info.add_timestamp)
2797                         info.length -= RB_LEN_TIME_EXTEND;
2798                 goto again;
2799         }
2800
2801         if (!event)
2802                 goto out_fail;
2803
2804         return event;
2805
2806  out_fail:
2807         rb_end_commit(cpu_buffer);
2808         return NULL;
2809 }
2810
2811 /**
2812  * ring_buffer_lock_reserve - reserve a part of the buffer
2813  * @buffer: the ring buffer to reserve from
2814  * @length: the length of the data to reserve (excluding event header)
2815  *
2816  * Returns a reseverd event on the ring buffer to copy directly to.
2817  * The user of this interface will need to get the body to write into
2818  * and can use the ring_buffer_event_data() interface.
2819  *
2820  * The length is the length of the data needed, not the event length
2821  * which also includes the event header.
2822  *
2823  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2824  * If NULL is returned, then nothing has been allocated or locked.
2825  */
2826 struct ring_buffer_event *
2827 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2828 {
2829         struct ring_buffer_per_cpu *cpu_buffer;
2830         struct ring_buffer_event *event;
2831         int cpu;
2832
2833         /* If we are tracing schedule, we don't want to recurse */
2834         preempt_disable_notrace();
2835
2836         if (unlikely(atomic_read(&buffer->record_disabled)))
2837                 goto out;
2838
2839         cpu = raw_smp_processor_id();
2840
2841         if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
2842                 goto out;
2843
2844         cpu_buffer = buffer->buffers[cpu];
2845
2846         if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
2847                 goto out;
2848
2849         if (unlikely(length > BUF_MAX_DATA_SIZE))
2850                 goto out;
2851
2852         if (unlikely(trace_recursive_lock(cpu_buffer)))
2853                 goto out;
2854
2855         event = rb_reserve_next_event(buffer, cpu_buffer, length);
2856         if (!event)
2857                 goto out_unlock;
2858
2859         return event;
2860
2861  out_unlock:
2862         trace_recursive_unlock(cpu_buffer);
2863  out:
2864         preempt_enable_notrace();
2865         return NULL;
2866 }
2867 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2868
2869 /*
2870  * Decrement the entries to the page that an event is on.
2871  * The event does not even need to exist, only the pointer
2872  * to the page it is on. This may only be called before the commit
2873  * takes place.
2874  */
2875 static inline void
2876 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2877                    struct ring_buffer_event *event)
2878 {
2879         unsigned long addr = (unsigned long)event;
2880         struct buffer_page *bpage = cpu_buffer->commit_page;
2881         struct buffer_page *start;
2882
2883         addr &= PAGE_MASK;
2884
2885         /* Do the likely case first */
2886         if (likely(bpage->page == (void *)addr)) {
2887                 local_dec(&bpage->entries);
2888                 return;
2889         }
2890
2891         /*
2892          * Because the commit page may be on the reader page we
2893          * start with the next page and check the end loop there.
2894          */
2895         rb_inc_page(cpu_buffer, &bpage);
2896         start = bpage;
2897         do {
2898                 if (bpage->page == (void *)addr) {
2899                         local_dec(&bpage->entries);
2900                         return;
2901                 }
2902                 rb_inc_page(cpu_buffer, &bpage);
2903         } while (bpage != start);
2904
2905         /* commit not part of this buffer?? */
2906         RB_WARN_ON(cpu_buffer, 1);
2907 }
2908
2909 /**
2910  * ring_buffer_commit_discard - discard an event that has not been committed
2911  * @buffer: the ring buffer
2912  * @event: non committed event to discard
2913  *
2914  * Sometimes an event that is in the ring buffer needs to be ignored.
2915  * This function lets the user discard an event in the ring buffer
2916  * and then that event will not be read later.
2917  *
2918  * This function only works if it is called before the the item has been
2919  * committed. It will try to free the event from the ring buffer
2920  * if another event has not been added behind it.
2921  *
2922  * If another event has been added behind it, it will set the event
2923  * up as discarded, and perform the commit.
2924  *
2925  * If this function is called, do not call ring_buffer_unlock_commit on
2926  * the event.
2927  */
2928 void ring_buffer_discard_commit(struct ring_buffer *buffer,
2929                                 struct ring_buffer_event *event)
2930 {
2931         struct ring_buffer_per_cpu *cpu_buffer;
2932         int cpu;
2933
2934         /* The event is discarded regardless */
2935         rb_event_discard(event);
2936
2937         cpu = smp_processor_id();
2938         cpu_buffer = buffer->buffers[cpu];
2939
2940         /*
2941          * This must only be called if the event has not been
2942          * committed yet. Thus we can assume that preemption
2943          * is still disabled.
2944          */
2945         RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2946
2947         rb_decrement_entry(cpu_buffer, event);
2948         if (rb_try_to_discard(cpu_buffer, event))
2949                 goto out;
2950
2951         /*
2952          * The commit is still visible by the reader, so we
2953          * must still update the timestamp.
2954          */
2955         rb_update_write_stamp(cpu_buffer, event);
2956  out:
2957         rb_end_commit(cpu_buffer);
2958
2959         trace_recursive_unlock(cpu_buffer);
2960
2961         preempt_enable_notrace();
2962
2963 }
2964 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2965
2966 /**
2967  * ring_buffer_write - write data to the buffer without reserving
2968  * @buffer: The ring buffer to write to.
2969  * @length: The length of the data being written (excluding the event header)
2970  * @data: The data to write to the buffer.
2971  *
2972  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2973  * one function. If you already have the data to write to the buffer, it
2974  * may be easier to simply call this function.
2975  *
2976  * Note, like ring_buffer_lock_reserve, the length is the length of the data
2977  * and not the length of the event which would hold the header.
2978  */
2979 int ring_buffer_write(struct ring_buffer *buffer,
2980                       unsigned long length,
2981                       void *data)
2982 {
2983         struct ring_buffer_per_cpu *cpu_buffer;
2984         struct ring_buffer_event *event;
2985         void *body;
2986         int ret = -EBUSY;
2987         int cpu;
2988
2989         preempt_disable_notrace();
2990
2991         if (atomic_read(&buffer->record_disabled))
2992                 goto out;
2993
2994         cpu = raw_smp_processor_id();
2995
2996         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2997                 goto out;
2998
2999         cpu_buffer = buffer->buffers[cpu];
3000
3001         if (atomic_read(&cpu_buffer->record_disabled))
3002                 goto out;
3003
3004         if (length > BUF_MAX_DATA_SIZE)
3005                 goto out;
3006
3007         if (unlikely(trace_recursive_lock(cpu_buffer)))
3008                 goto out;
3009
3010         event = rb_reserve_next_event(buffer, cpu_buffer, length);
3011         if (!event)
3012                 goto out_unlock;
3013
3014         body = rb_event_data(event);
3015
3016         memcpy(body, data, length);
3017
3018         rb_commit(cpu_buffer, event);
3019
3020         rb_wakeups(buffer, cpu_buffer);
3021
3022         ret = 0;
3023
3024  out_unlock:
3025         trace_recursive_unlock(cpu_buffer);
3026
3027  out:
3028         preempt_enable_notrace();
3029
3030         return ret;
3031 }
3032 EXPORT_SYMBOL_GPL(ring_buffer_write);
3033
3034 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3035 {
3036         struct buffer_page *reader = cpu_buffer->reader_page;
3037         struct buffer_page *head = rb_set_head_page(cpu_buffer);
3038         struct buffer_page *commit = cpu_buffer->commit_page;
3039
3040         /* In case of error, head will be NULL */
3041         if (unlikely(!head))
3042                 return true;
3043
3044         return reader->read == rb_page_commit(reader) &&
3045                 (commit == reader ||
3046                  (commit == head &&
3047                   head->read == rb_page_commit(commit)));
3048 }
3049
3050 /**
3051  * ring_buffer_record_disable - stop all writes into the buffer
3052  * @buffer: The ring buffer to stop writes to.
3053  *
3054  * This prevents all writes to the buffer. Any attempt to write
3055  * to the buffer after this will fail and return NULL.
3056  *
3057  * The caller should call synchronize_sched() after this.
3058  */
3059 void ring_buffer_record_disable(struct ring_buffer *buffer)
3060 {
3061         atomic_inc(&buffer->record_disabled);
3062 }
3063 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3064
3065 /**
3066  * ring_buffer_record_enable - enable writes to the buffer
3067  * @buffer: The ring buffer to enable writes
3068  *
3069  * Note, multiple disables will need the same number of enables
3070  * to truly enable the writing (much like preempt_disable).
3071  */
3072 void ring_buffer_record_enable(struct ring_buffer *buffer)
3073 {
3074         atomic_dec(&buffer->record_disabled);
3075 }
3076 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3077
3078 /**
3079  * ring_buffer_record_off - stop all writes into the buffer
3080  * @buffer: The ring buffer to stop writes to.
3081  *
3082  * This prevents all writes to the buffer. Any attempt to write
3083  * to the buffer after this will fail and return NULL.
3084  *
3085  * This is different than ring_buffer_record_disable() as
3086  * it works like an on/off switch, where as the disable() version
3087  * must be paired with a enable().
3088  */
3089 void ring_buffer_record_off(struct ring_buffer *buffer)
3090 {
3091         unsigned int rd;
3092         unsigned int new_rd;
3093
3094         do {
3095                 rd = atomic_read(&buffer->record_disabled);
3096                 new_rd = rd | RB_BUFFER_OFF;
3097         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3098 }
3099 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3100
3101 /**
3102  * ring_buffer_record_on - restart writes into the buffer
3103  * @buffer: The ring buffer to start writes to.
3104  *
3105  * This enables all writes to the buffer that was disabled by
3106  * ring_buffer_record_off().
3107  *
3108  * This is different than ring_buffer_record_enable() as
3109  * it works like an on/off switch, where as the enable() version
3110  * must be paired with a disable().
3111  */
3112 void ring_buffer_record_on(struct ring_buffer *buffer)
3113 {
3114         unsigned int rd;
3115         unsigned int new_rd;
3116
3117         do {
3118                 rd = atomic_read(&buffer->record_disabled);
3119                 new_rd = rd & ~RB_BUFFER_OFF;
3120         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3121 }
3122 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3123
3124 /**
3125  * ring_buffer_record_is_on - return true if the ring buffer can write
3126  * @buffer: The ring buffer to see if write is enabled
3127  *
3128  * Returns true if the ring buffer is in a state that it accepts writes.
3129  */
3130 int ring_buffer_record_is_on(struct ring_buffer *buffer)
3131 {
3132         return !atomic_read(&buffer->record_disabled);
3133 }
3134
3135 /**
3136  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3137  * @buffer: The ring buffer to stop writes to.
3138  * @cpu: The CPU buffer to stop
3139  *
3140  * This prevents all writes to the buffer. Any attempt to write
3141  * to the buffer after this will fail and return NULL.
3142  *
3143  * The caller should call synchronize_sched() after this.
3144  */
3145 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3146 {
3147         struct ring_buffer_per_cpu *cpu_buffer;
3148
3149         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3150                 return;
3151
3152         cpu_buffer = buffer->buffers[cpu];
3153         atomic_inc(&cpu_buffer->record_disabled);
3154 }
3155 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3156
3157 /**
3158  * ring_buffer_record_enable_cpu - enable writes to the buffer
3159  * @buffer: The ring buffer to enable writes
3160  * @cpu: The CPU to enable.
3161  *
3162  * Note, multiple disables will need the same number of enables
3163  * to truly enable the writing (much like preempt_disable).
3164  */
3165 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3166 {
3167         struct ring_buffer_per_cpu *cpu_buffer;
3168
3169         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3170                 return;
3171
3172         cpu_buffer = buffer->buffers[cpu];
3173         atomic_dec(&cpu_buffer->record_disabled);
3174 }
3175 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3176
3177 /*
3178  * The total entries in the ring buffer is the running counter
3179  * of entries entered into the ring buffer, minus the sum of
3180  * the entries read from the ring buffer and the number of
3181  * entries that were overwritten.
3182  */
3183 static inline unsigned long
3184 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3185 {
3186         return local_read(&cpu_buffer->entries) -
3187                 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3188 }
3189
3190 /**
3191  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3192  * @buffer: The ring buffer
3193  * @cpu: The per CPU buffer to read from.
3194  */
3195 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3196 {
3197         unsigned long flags;
3198         struct ring_buffer_per_cpu *cpu_buffer;
3199         struct buffer_page *bpage;
3200         u64 ret = 0;
3201
3202         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3203                 return 0;
3204
3205         cpu_buffer = buffer->buffers[cpu];
3206         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3207         /*
3208          * if the tail is on reader_page, oldest time stamp is on the reader
3209          * page
3210          */
3211         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3212                 bpage = cpu_buffer->reader_page;
3213         else
3214                 bpage = rb_set_head_page(cpu_buffer);
3215         if (bpage)
3216                 ret = bpage->page->time_stamp;
3217         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3218
3219         return ret;
3220 }
3221 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3222
3223 /**
3224  * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3225  * @buffer: The ring buffer
3226  * @cpu: The per CPU buffer to read from.
3227  */
3228 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3229 {
3230         struct ring_buffer_per_cpu *cpu_buffer;
3231         unsigned long ret;
3232
3233         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3234                 return 0;
3235
3236         cpu_buffer = buffer->buffers[cpu];
3237         ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3238
3239         return ret;
3240 }
3241 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3242
3243 /**
3244  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3245  * @buffer: The ring buffer
3246  * @cpu: The per CPU buffer to get the entries from.
3247  */
3248 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3249 {
3250         struct ring_buffer_per_cpu *cpu_buffer;
3251
3252         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3253                 return 0;
3254
3255         cpu_buffer = buffer->buffers[cpu];
3256
3257         return rb_num_of_entries(cpu_buffer);
3258 }
3259 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3260
3261 /**
3262  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3263  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3264  * @buffer: The ring buffer
3265  * @cpu: The per CPU buffer to get the number of overruns from
3266  */
3267 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3268 {
3269         struct ring_buffer_per_cpu *cpu_buffer;
3270         unsigned long ret;
3271
3272         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3273                 return 0;
3274
3275         cpu_buffer = buffer->buffers[cpu];
3276         ret = local_read(&cpu_buffer->overrun);
3277
3278         return ret;
3279 }
3280 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3281
3282 /**
3283  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3284  * commits failing due to the buffer wrapping around while there are uncommitted
3285  * events, such as during an interrupt storm.
3286  * @buffer: The ring buffer
3287  * @cpu: The per CPU buffer to get the number of overruns from
3288  */
3289 unsigned long
3290 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3291 {
3292         struct ring_buffer_per_cpu *cpu_buffer;
3293         unsigned long ret;
3294
3295         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3296                 return 0;
3297
3298         cpu_buffer = buffer->buffers[cpu];
3299         ret = local_read(&cpu_buffer->commit_overrun);
3300
3301         return ret;
3302 }
3303 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3304
3305 /**
3306  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3307  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3308  * @buffer: The ring buffer
3309  * @cpu: The per CPU buffer to get the number of overruns from
3310  */
3311 unsigned long
3312 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3313 {
3314         struct ring_buffer_per_cpu *cpu_buffer;
3315         unsigned long ret;
3316
3317         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3318                 return 0;
3319
3320         cpu_buffer = buffer->buffers[cpu];
3321         ret = local_read(&cpu_buffer->dropped_events);
3322
3323         return ret;
3324 }
3325 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3326
3327 /**
3328  * ring_buffer_read_events_cpu - get the number of events successfully read
3329  * @buffer: The ring buffer
3330  * @cpu: The per CPU buffer to get the number of events read
3331  */
3332 unsigned long
3333 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3334 {
3335         struct ring_buffer_per_cpu *cpu_buffer;
3336
3337         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3338                 return 0;
3339
3340         cpu_buffer = buffer->buffers[cpu];
3341         return cpu_buffer->read;
3342 }
3343 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3344
3345 /**
3346  * ring_buffer_entries - get the number of entries in a buffer
3347  * @buffer: The ring buffer
3348  *
3349  * Returns the total number of entries in the ring buffer
3350  * (all CPU entries)
3351  */
3352 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3353 {
3354         struct ring_buffer_per_cpu *cpu_buffer;
3355         unsigned long entries = 0;
3356         int cpu;
3357
3358         /* if you care about this being correct, lock the buffer */
3359         for_each_buffer_cpu(buffer, cpu) {
3360                 cpu_buffer = buffer->buffers[cpu];
3361                 entries += rb_num_of_entries(cpu_buffer);
3362         }
3363
3364         return entries;
3365 }
3366 EXPORT_SYMBOL_GPL(ring_buffer_entries);
3367
3368 /**
3369  * ring_buffer_overruns - get the number of overruns in buffer
3370  * @buffer: The ring buffer
3371  *
3372  * Returns the total number of overruns in the ring buffer
3373  * (all CPU entries)
3374  */
3375 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3376 {
3377         struct ring_buffer_per_cpu *cpu_buffer;
3378         unsigned long overruns = 0;
3379         int cpu;
3380
3381         /* if you care about this being correct, lock the buffer */
3382         for_each_buffer_cpu(buffer, cpu) {
3383                 cpu_buffer = buffer->buffers[cpu];
3384                 overruns += local_read(&cpu_buffer->overrun);
3385         }
3386
3387         return overruns;
3388 }
3389 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3390
3391 static void rb_iter_reset(struct ring_buffer_iter *iter)
3392 {
3393         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3394
3395         /* Iterator usage is expected to have record disabled */
3396         iter->head_page = cpu_buffer->reader_page;
3397         iter->head = cpu_buffer->reader_page->read;
3398
3399         iter->cache_reader_page = iter->head_page;
3400         iter->cache_read = cpu_buffer->read;
3401
3402         if (iter->head)
3403                 iter->read_stamp = cpu_buffer->read_stamp;
3404         else
3405                 iter->read_stamp = iter->head_page->page->time_stamp;
3406 }
3407
3408 /**
3409  * ring_buffer_iter_reset - reset an iterator
3410  * @iter: The iterator to reset
3411  *
3412  * Resets the iterator, so that it will start from the beginning
3413  * again.
3414  */
3415 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3416 {
3417         struct ring_buffer_per_cpu *cpu_buffer;
3418         unsigned long flags;
3419
3420         if (!iter)
3421                 return;
3422
3423         cpu_buffer = iter->cpu_buffer;
3424
3425         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3426         rb_iter_reset(iter);
3427         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3428 }
3429 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3430
3431 /**
3432  * ring_buffer_iter_empty - check if an iterator has no more to read
3433  * @iter: The iterator to check
3434  */
3435 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3436 {
3437         struct ring_buffer_per_cpu *cpu_buffer;
3438
3439         cpu_buffer = iter->cpu_buffer;
3440
3441         return iter->head_page == cpu_buffer->commit_page &&
3442                 iter->head == rb_commit_index(cpu_buffer);
3443 }
3444 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3445
3446 static void
3447 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3448                      struct ring_buffer_event *event)
3449 {
3450         u64 delta;
3451
3452         switch (event->type_len) {
3453         case RINGBUF_TYPE_PADDING:
3454                 return;
3455
3456         case RINGBUF_TYPE_TIME_EXTEND:
3457                 delta = event->array[0];
3458                 delta <<= TS_SHIFT;
3459                 delta += event->time_delta;
3460                 cpu_buffer->read_stamp += delta;
3461                 return;
3462
3463         case RINGBUF_TYPE_TIME_STAMP:
3464                 /* FIXME: not implemented */
3465                 return;
3466
3467         case RINGBUF_TYPE_DATA:
3468                 cpu_buffer->read_stamp += event->time_delta;
3469                 return;
3470
3471         default:
3472                 BUG();
3473         }
3474         return;
3475 }
3476
3477 static void
3478 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3479                           struct ring_buffer_event *event)
3480 {
3481         u64 delta;
3482
3483         switch (event->type_len) {
3484         case RINGBUF_TYPE_PADDING:
3485                 return;
3486
3487         case RINGBUF_TYPE_TIME_EXTEND:
3488                 delta = event->array[0];
3489                 delta <<= TS_SHIFT;
3490                 delta += event->time_delta;
3491                 iter->read_stamp += delta;
3492                 return;
3493
3494         case RINGBUF_TYPE_TIME_STAMP:
3495                 /* FIXME: not implemented */
3496                 return;
3497
3498         case RINGBUF_TYPE_DATA:
3499                 iter->read_stamp += event->time_delta;
3500                 return;
3501
3502         default:
3503                 BUG();
3504         }
3505         return;
3506 }
3507
3508 static struct buffer_page *
3509 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3510 {
3511         struct buffer_page *reader = NULL;
3512         unsigned long overwrite;
3513         unsigned long flags;
3514         int nr_loops = 0;
3515         int ret;
3516
3517         local_irq_save(flags);
3518         arch_spin_lock(&cpu_buffer->lock);
3519
3520  again:
3521         /*
3522          * This should normally only loop twice. But because the
3523          * start of the reader inserts an empty page, it causes
3524          * a case where we will loop three times. There should be no
3525          * reason to loop four times (that I know of).
3526          */
3527         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3528                 reader = NULL;
3529                 goto out;
3530         }
3531
3532         reader = cpu_buffer->reader_page;
3533
3534         /* If there's more to read, return this page */
3535         if (cpu_buffer->reader_page->read < rb_page_size(reader))
3536                 goto out;
3537
3538         /* Never should we have an index greater than the size */
3539         if (RB_WARN_ON(cpu_buffer,
3540                        cpu_buffer->reader_page->read > rb_page_size(reader)))
3541                 goto out;
3542
3543         /* check if we caught up to the tail */
3544         reader = NULL;
3545         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3546                 goto out;
3547
3548         /* Don't bother swapping if the ring buffer is empty */
3549         if (rb_num_of_entries(cpu_buffer) == 0)
3550                 goto out;
3551
3552         /*
3553          * Reset the reader page to size zero.
3554          */
3555         local_set(&cpu_buffer->reader_page->write, 0);
3556         local_set(&cpu_buffer->reader_page->entries, 0);
3557         local_set(&cpu_buffer->reader_page->page->commit, 0);
3558         cpu_buffer->reader_page->real_end = 0;
3559
3560  spin:
3561         /*
3562          * Splice the empty reader page into the list around the head.
3563          */
3564         reader = rb_set_head_page(cpu_buffer);
3565         if (!reader)
3566                 goto out;
3567         cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3568         cpu_buffer->reader_page->list.prev = reader->list.prev;
3569
3570         /*
3571          * cpu_buffer->pages just needs to point to the buffer, it
3572          *  has no specific buffer page to point to. Lets move it out
3573          *  of our way so we don't accidentally swap it.
3574          */
3575         cpu_buffer->pages = reader->list.prev;
3576
3577         /* The reader page will be pointing to the new head */
3578         rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3579
3580         /*
3581          * We want to make sure we read the overruns after we set up our
3582          * pointers to the next object. The writer side does a
3583          * cmpxchg to cross pages which acts as the mb on the writer
3584          * side. Note, the reader will constantly fail the swap
3585          * while the writer is updating the pointers, so this
3586          * guarantees that the overwrite recorded here is the one we
3587          * want to compare with the last_overrun.
3588          */
3589         smp_mb();
3590         overwrite = local_read(&(cpu_buffer->overrun));
3591
3592         /*
3593          * Here's the tricky part.
3594          *
3595          * We need to move the pointer past the header page.
3596          * But we can only do that if a writer is not currently
3597          * moving it. The page before the header page has the
3598          * flag bit '1' set if it is pointing to the page we want.
3599          * but if the writer is in the process of moving it
3600          * than it will be '2' or already moved '0'.
3601          */
3602
3603         ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3604
3605         /*
3606          * If we did not convert it, then we must try again.
3607          */
3608         if (!ret)
3609                 goto spin;
3610
3611         /*
3612          * Yeah! We succeeded in replacing the page.
3613          *
3614          * Now make the new head point back to the reader page.
3615          */
3616         rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3617         rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3618
3619         /* Finally update the reader page to the new head */
3620         cpu_buffer->reader_page = reader;
3621         cpu_buffer->reader_page->read = 0;
3622
3623         if (overwrite != cpu_buffer->last_overrun) {
3624                 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3625                 cpu_buffer->last_overrun = overwrite;
3626         }
3627
3628         goto again;
3629
3630  out:
3631         /* Update the read_stamp on the first event */
3632         if (reader && reader->read == 0)
3633                 cpu_buffer->read_stamp = reader->page->time_stamp;
3634
3635         arch_spin_unlock(&cpu_buffer->lock);
3636         local_irq_restore(flags);
3637
3638         return reader;
3639 }
3640
3641 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3642 {
3643         struct ring_buffer_event *event;
3644         struct buffer_page *reader;
3645         unsigned length;
3646
3647         reader = rb_get_reader_page(cpu_buffer);
3648
3649         /* This function should not be called when buffer is empty */
3650         if (RB_WARN_ON(cpu_buffer, !reader))
3651                 return;
3652
3653         event = rb_reader_event(cpu_buffer);
3654
3655         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3656                 cpu_buffer->read++;
3657
3658         rb_update_read_stamp(cpu_buffer, event);
3659
3660         length = rb_event_length(event);
3661         cpu_buffer->reader_page->read += length;
3662 }
3663
3664 static void rb_advance_iter(struct ring_buffer_iter *iter)
3665 {
3666         struct ring_buffer_per_cpu *cpu_buffer;
3667         struct ring_buffer_event *event;
3668         unsigned length;
3669
3670         cpu_buffer = iter->cpu_buffer;
3671
3672         /*
3673          * Check if we are at the end of the buffer.
3674          */
3675         if (iter->head >= rb_page_size(iter->head_page)) {
3676                 /* discarded commits can make the page empty */
3677                 if (iter->head_page == cpu_buffer->commit_page)
3678                         return;
3679                 rb_inc_iter(iter);
3680                 return;
3681         }
3682
3683         event = rb_iter_head_event(iter);
3684
3685         length = rb_event_length(event);
3686
3687         /*
3688          * This should not be called to advance the header if we are
3689          * at the tail of the buffer.
3690          */
3691         if (RB_WARN_ON(cpu_buffer,
3692                        (iter->head_page == cpu_buffer->commit_page) &&
3693                        (iter->head + length > rb_commit_index(cpu_buffer))))
3694                 return;
3695
3696         rb_update_iter_read_stamp(iter, event);
3697
3698         iter->head += length;
3699
3700         /* check for end of page padding */
3701         if ((iter->head >= rb_page_size(iter->head_page)) &&
3702             (iter->head_page != cpu_buffer->commit_page))
3703                 rb_inc_iter(iter);
3704 }
3705
3706 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3707 {
3708         return cpu_buffer->lost_events;
3709 }
3710
3711 static struct ring_buffer_event *
3712 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3713                unsigned long *lost_events)
3714 {
3715         struct ring_buffer_event *event;
3716         struct buffer_page *reader;
3717         int nr_loops = 0;
3718
3719  again:
3720         /*
3721          * We repeat when a time extend is encountered.
3722          * Since the time extend is always attached to a data event,
3723          * we should never loop more than once.
3724          * (We never hit the following condition more than twice).
3725          */
3726         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3727                 return NULL;
3728
3729         reader = rb_get_reader_page(cpu_buffer);
3730         if (!reader)
3731                 return NULL;
3732
3733         event = rb_reader_event(cpu_buffer);
3734
3735         switch (event->type_len) {
3736         case RINGBUF_TYPE_PADDING:
3737                 if (rb_null_event(event))
3738                         RB_WARN_ON(cpu_buffer, 1);
3739                 /*
3740                  * Because the writer could be discarding every
3741                  * event it creates (which would probably be bad)
3742                  * if we were to go back to "again" then we may never
3743                  * catch up, and will trigger the warn on, or lock
3744                  * the box. Return the padding, and we will release
3745                  * the current locks, and try again.
3746                  */
3747                 return event;
3748
3749         case RINGBUF_TYPE_TIME_EXTEND:
3750                 /* Internal data, OK to advance */
3751                 rb_advance_reader(cpu_buffer);
3752                 goto again;
3753
3754         case RINGBUF_TYPE_TIME_STAMP:
3755                 /* FIXME: not implemented */
3756                 rb_advance_reader(cpu_buffer);
3757                 goto again;
3758
3759         case RINGBUF_TYPE_DATA:
3760                 if (ts) {
3761                         *ts = cpu_buffer->read_stamp + event->time_delta;
3762                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3763                                                          cpu_buffer->cpu, ts);
3764                 }
3765                 if (lost_events)
3766                         *lost_events = rb_lost_events(cpu_buffer);
3767                 return event;
3768
3769         default:
3770                 BUG();
3771         }
3772
3773         return NULL;
3774 }
3775 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3776
3777 static struct ring_buffer_event *
3778 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3779 {
3780         struct ring_buffer *buffer;
3781         struct ring_buffer_per_cpu *cpu_buffer;
3782         struct ring_buffer_event *event;
3783         int nr_loops = 0;
3784
3785         cpu_buffer = iter->cpu_buffer;
3786         buffer = cpu_buffer->buffer;
3787
3788         /*
3789          * Check if someone performed a consuming read to
3790          * the buffer. A consuming read invalidates the iterator
3791          * and we need to reset the iterator in this case.
3792          */
3793         if (unlikely(iter->cache_read != cpu_buffer->read ||
3794                      iter->cache_reader_page != cpu_buffer->reader_page))
3795                 rb_iter_reset(iter);
3796
3797  again:
3798         if (ring_buffer_iter_empty(iter))
3799                 return NULL;
3800
3801         /*
3802          * We repeat when a time extend is encountered or we hit
3803          * the end of the page. Since the time extend is always attached
3804          * to a data event, we should never loop more than three times.
3805          * Once for going to next page, once on time extend, and
3806          * finally once to get the event.
3807          * (We never hit the following condition more than thrice).
3808          */
3809         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
3810                 return NULL;
3811
3812         if (rb_per_cpu_empty(cpu_buffer))
3813                 return NULL;
3814
3815         if (iter->head >= rb_page_size(iter->head_page)) {
3816                 rb_inc_iter(iter);
3817                 goto again;
3818         }
3819
3820         event = rb_iter_head_event(iter);
3821
3822         switch (event->type_len) {
3823         case RINGBUF_TYPE_PADDING:
3824                 if (rb_null_event(event)) {
3825                         rb_inc_iter(iter);
3826                         goto again;
3827                 }
3828                 rb_advance_iter(iter);
3829                 return event;
3830
3831         case RINGBUF_TYPE_TIME_EXTEND:
3832                 /* Internal data, OK to advance */
3833                 rb_advance_iter(iter);
3834                 goto again;
3835
3836         case RINGBUF_TYPE_TIME_STAMP:
3837                 /* FIXME: not implemented */
3838                 rb_advance_iter(iter);
3839                 goto again;
3840
3841         case RINGBUF_TYPE_DATA:
3842                 if (ts) {
3843                         *ts = iter->read_stamp + event->time_delta;
3844                         ring_buffer_normalize_time_stamp(buffer,
3845                                                          cpu_buffer->cpu, ts);
3846                 }
3847                 return event;
3848
3849         default:
3850                 BUG();
3851         }
3852
3853         return NULL;
3854 }
3855 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3856
3857 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
3858 {
3859         if (likely(!in_nmi())) {
3860                 raw_spin_lock(&cpu_buffer->reader_lock);
3861                 return true;
3862         }
3863
3864         /*
3865          * If an NMI die dumps out the content of the ring buffer
3866          * trylock must be used to prevent a deadlock if the NMI
3867          * preempted a task that holds the ring buffer locks. If
3868          * we get the lock then all is fine, if not, then continue
3869          * to do the read, but this can corrupt the ring buffer,
3870          * so it must be permanently disabled from future writes.
3871          * Reading from NMI is a oneshot deal.
3872          */
3873         if (raw_spin_trylock(&cpu_buffer->reader_lock))
3874                 return true;
3875
3876         /* Continue without locking, but disable the ring buffer */
3877         atomic_inc(&cpu_buffer->record_disabled);
3878         return false;
3879 }
3880
3881 static inline void
3882 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
3883 {
3884         if (likely(locked))
3885                 raw_spin_unlock(&cpu_buffer->reader_lock);
3886         return;
3887 }
3888
3889 /**
3890  * ring_buffer_peek - peek at the next event to be read
3891  * @buffer: The ring buffer to read
3892  * @cpu: The cpu to peak at
3893  * @ts: The timestamp counter of this event.
3894  * @lost_events: a variable to store if events were lost (may be NULL)
3895  *
3896  * This will return the event that will be read next, but does
3897  * not consume the data.
3898  */
3899 struct ring_buffer_event *
3900 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3901                  unsigned long *lost_events)
3902 {
3903         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3904         struct ring_buffer_event *event;
3905         unsigned long flags;
3906         bool dolock;
3907
3908         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3909                 return NULL;
3910
3911  again:
3912         local_irq_save(flags);
3913         dolock = rb_reader_lock(cpu_buffer);
3914         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3915         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3916                 rb_advance_reader(cpu_buffer);
3917         rb_reader_unlock(cpu_buffer, dolock);
3918         local_irq_restore(flags);
3919
3920         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3921                 goto again;
3922
3923         return event;
3924 }
3925
3926 /**
3927  * ring_buffer_iter_peek - peek at the next event to be read
3928  * @iter: The ring buffer iterator
3929  * @ts: The timestamp counter of this event.
3930  *
3931  * This will return the event that will be read next, but does
3932  * not increment the iterator.
3933  */
3934 struct ring_buffer_event *
3935 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3936 {
3937         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3938         struct ring_buffer_event *event;
3939         unsigned long flags;
3940
3941  again:
3942         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3943         event = rb_iter_peek(iter, ts);
3944         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3945
3946         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3947                 goto again;
3948
3949         return event;
3950 }
3951
3952 /**
3953  * ring_buffer_consume - return an event and consume it
3954  * @buffer: The ring buffer to get the next event from
3955  * @cpu: the cpu to read the buffer from
3956  * @ts: a variable to store the timestamp (may be NULL)
3957  * @lost_events: a variable to store if events were lost (may be NULL)
3958  *
3959  * Returns the next event in the ring buffer, and that event is consumed.
3960  * Meaning, that sequential reads will keep returning a different event,
3961  * and eventually empty the ring buffer if the producer is slower.
3962  */
3963 struct ring_buffer_event *
3964 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3965                     unsigned long *lost_events)
3966 {
3967         struct ring_buffer_per_cpu *cpu_buffer;
3968         struct ring_buffer_event *event = NULL;
3969         unsigned long flags;
3970         bool dolock;
3971
3972  again:
3973         /* might be called in atomic */
3974         preempt_disable();
3975
3976         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3977                 goto out;
3978
3979         cpu_buffer = buffer->buffers[cpu];
3980         local_irq_save(flags);
3981         dolock = rb_reader_lock(cpu_buffer);
3982
3983         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3984         if (event) {
3985                 cpu_buffer->lost_events = 0;
3986                 rb_advance_reader(cpu_buffer);
3987         }
3988
3989         rb_reader_unlock(cpu_buffer, dolock);
3990         local_irq_restore(flags);
3991
3992  out:
3993         preempt_enable();
3994
3995         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3996                 goto again;
3997
3998         return event;
3999 }
4000 EXPORT_SYMBOL_GPL(ring_buffer_consume);
4001
4002 /**
4003  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4004  * @buffer: The ring buffer to read from
4005  * @cpu: The cpu buffer to iterate over
4006  *
4007  * This performs the initial preparations necessary to iterate
4008  * through the buffer.  Memory is allocated, buffer recording
4009  * is disabled, and the iterator pointer is returned to the caller.
4010  *
4011  * Disabling buffer recordng prevents the reading from being
4012  * corrupted. This is not a consuming read, so a producer is not
4013  * expected.
4014  *
4015  * After a sequence of ring_buffer_read_prepare calls, the user is
4016  * expected to make at least one call to ring_buffer_read_prepare_sync.
4017  * Afterwards, ring_buffer_read_start is invoked to get things going
4018  * for real.
4019  *
4020  * This overall must be paired with ring_buffer_read_finish.
4021  */
4022 struct ring_buffer_iter *
4023 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
4024 {
4025         struct ring_buffer_per_cpu *cpu_buffer;
4026         struct ring_buffer_iter *iter;
4027
4028         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4029                 return NULL;
4030
4031         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
4032         if (!iter)
4033                 return NULL;
4034
4035         cpu_buffer = buffer->buffers[cpu];
4036
4037         iter->cpu_buffer = cpu_buffer;
4038
4039         atomic_inc(&buffer->resize_disabled);
4040         atomic_inc(&cpu_buffer->record_disabled);
4041
4042         return iter;
4043 }
4044 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4045
4046 /**
4047  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4048  *
4049  * All previously invoked ring_buffer_read_prepare calls to prepare
4050  * iterators will be synchronized.  Afterwards, read_buffer_read_start
4051  * calls on those iterators are allowed.
4052  */
4053 void
4054 ring_buffer_read_prepare_sync(void)
4055 {
4056         synchronize_sched();
4057 }
4058 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4059
4060 /**
4061  * ring_buffer_read_start - start a non consuming read of the buffer
4062  * @iter: The iterator returned by ring_buffer_read_prepare
4063  *
4064  * This finalizes the startup of an iteration through the buffer.
4065  * The iterator comes from a call to ring_buffer_read_prepare and
4066  * an intervening ring_buffer_read_prepare_sync must have been
4067  * performed.
4068  *
4069  * Must be paired with ring_buffer_read_finish.
4070  */
4071 void
4072 ring_buffer_read_start(struct ring_buffer_iter *iter)
4073 {
4074         struct ring_buffer_per_cpu *cpu_buffer;
4075         unsigned long flags;
4076
4077         if (!iter)
4078                 return;
4079
4080         cpu_buffer = iter->cpu_buffer;
4081
4082         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4083         arch_spin_lock(&cpu_buffer->lock);
4084         rb_iter_reset(iter);
4085         arch_spin_unlock(&cpu_buffer->lock);
4086         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4087 }
4088 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4089
4090 /**
4091  * ring_buffer_read_finish - finish reading the iterator of the buffer
4092  * @iter: The iterator retrieved by ring_buffer_start
4093  *
4094  * This re-enables the recording to the buffer, and frees the
4095  * iterator.
4096  */
4097 void
4098 ring_buffer_read_finish(struct ring_buffer_iter *iter)
4099 {
4100         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4101         unsigned long flags;
4102
4103         /*
4104          * Ring buffer is disabled from recording, here's a good place
4105          * to check the integrity of the ring buffer.
4106          * Must prevent readers from trying to read, as the check
4107          * clears the HEAD page and readers require it.
4108          */
4109         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4110         rb_check_pages(cpu_buffer);
4111         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4112
4113         atomic_dec(&cpu_buffer->record_disabled);
4114         atomic_dec(&cpu_buffer->buffer->resize_disabled);
4115         kfree(iter);
4116 }
4117 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4118
4119 /**
4120  * ring_buffer_read - read the next item in the ring buffer by the iterator
4121  * @iter: The ring buffer iterator
4122  * @ts: The time stamp of the event read.
4123  *
4124  * This reads the next event in the ring buffer and increments the iterator.
4125  */
4126 struct ring_buffer_event *
4127 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4128 {
4129         struct ring_buffer_event *event;
4130         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4131         unsigned long flags;
4132
4133         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4134  again:
4135         event = rb_iter_peek(iter, ts);
4136         if (!event)
4137                 goto out;
4138
4139         if (event->type_len == RINGBUF_TYPE_PADDING)
4140                 goto again;
4141
4142         rb_advance_iter(iter);
4143  out:
4144         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4145
4146         return event;
4147 }
4148 EXPORT_SYMBOL_GPL(ring_buffer_read);
4149
4150 /**
4151  * ring_buffer_size - return the size of the ring buffer (in bytes)
4152  * @buffer: The ring buffer.
4153  */
4154 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4155 {
4156         /*
4157          * Earlier, this method returned
4158          *      BUF_PAGE_SIZE * buffer->nr_pages
4159          * Since the nr_pages field is now removed, we have converted this to
4160          * return the per cpu buffer value.
4161          */
4162         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4163                 return 0;
4164
4165         return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4166 }
4167 EXPORT_SYMBOL_GPL(ring_buffer_size);
4168
4169 static void
4170 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4171 {
4172         rb_head_page_deactivate(cpu_buffer);
4173
4174         cpu_buffer->head_page
4175                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
4176         local_set(&cpu_buffer->head_page->write, 0);
4177         local_set(&cpu_buffer->head_page->entries, 0);
4178         local_set(&cpu_buffer->head_page->page->commit, 0);
4179
4180         cpu_buffer->head_page->read = 0;
4181
4182         cpu_buffer->tail_page = cpu_buffer->head_page;
4183         cpu_buffer->commit_page = cpu_buffer->head_page;
4184
4185         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4186         INIT_LIST_HEAD(&cpu_buffer->new_pages);
4187         local_set(&cpu_buffer->reader_page->write, 0);
4188         local_set(&cpu_buffer->reader_page->entries, 0);
4189         local_set(&cpu_buffer->reader_page->page->commit, 0);
4190         cpu_buffer->reader_page->read = 0;
4191
4192         local_set(&cpu_buffer->entries_bytes, 0);
4193         local_set(&cpu_buffer->overrun, 0);
4194         local_set(&cpu_buffer->commit_overrun, 0);
4195         local_set(&cpu_buffer->dropped_events, 0);
4196         local_set(&cpu_buffer->entries, 0);
4197         local_set(&cpu_buffer->committing, 0);
4198         local_set(&cpu_buffer->commits, 0);
4199         cpu_buffer->read = 0;
4200         cpu_buffer->read_bytes = 0;
4201
4202         cpu_buffer->write_stamp = 0;
4203         cpu_buffer->read_stamp = 0;
4204
4205         cpu_buffer->lost_events = 0;
4206         cpu_buffer->last_overrun = 0;
4207
4208         rb_head_page_activate(cpu_buffer);
4209 }
4210
4211 /**
4212  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4213  * @buffer: The ring buffer to reset a per cpu buffer of
4214  * @cpu: The CPU buffer to be reset
4215  */
4216 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4217 {
4218         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4219         unsigned long flags;
4220
4221         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4222                 return;
4223
4224         atomic_inc(&buffer->resize_disabled);
4225         atomic_inc(&cpu_buffer->record_disabled);
4226
4227         /* Make sure all commits have finished */
4228         synchronize_sched();
4229
4230         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4231
4232         if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4233                 goto out;
4234
4235         arch_spin_lock(&cpu_buffer->lock);
4236
4237         rb_reset_cpu(cpu_buffer);
4238
4239         arch_spin_unlock(&cpu_buffer->lock);
4240
4241  out:
4242         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4243
4244         atomic_dec(&cpu_buffer->record_disabled);
4245         atomic_dec(&buffer->resize_disabled);
4246 }
4247 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4248
4249 /**
4250  * ring_buffer_reset - reset a ring buffer
4251  * @buffer: The ring buffer to reset all cpu buffers
4252  */
4253 void ring_buffer_reset(struct ring_buffer *buffer)
4254 {
4255         int cpu;
4256
4257         for_each_buffer_cpu(buffer, cpu)
4258                 ring_buffer_reset_cpu(buffer, cpu);
4259 }
4260 EXPORT_SYMBOL_GPL(ring_buffer_reset);
4261
4262 /**
4263  * rind_buffer_empty - is the ring buffer empty?
4264  * @buffer: The ring buffer to test
4265  */
4266 bool ring_buffer_empty(struct ring_buffer *buffer)
4267 {
4268         struct ring_buffer_per_cpu *cpu_buffer;
4269         unsigned long flags;
4270         bool dolock;
4271         int cpu;
4272         int ret;
4273
4274         /* yes this is racy, but if you don't like the race, lock the buffer */
4275         for_each_buffer_cpu(buffer, cpu) {
4276                 cpu_buffer = buffer->buffers[cpu];
4277                 local_irq_save(flags);
4278                 dolock = rb_reader_lock(cpu_buffer);
4279                 ret = rb_per_cpu_empty(cpu_buffer);
4280                 rb_reader_unlock(cpu_buffer, dolock);
4281                 local_irq_restore(flags);
4282
4283                 if (!ret)
4284                         return false;
4285         }
4286
4287         return true;
4288 }
4289 EXPORT_SYMBOL_GPL(ring_buffer_empty);
4290
4291 /**
4292  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4293  * @buffer: The ring buffer
4294  * @cpu: The CPU buffer to test
4295  */
4296 bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4297 {
4298         struct ring_buffer_per_cpu *cpu_buffer;
4299         unsigned long flags;
4300         bool dolock;
4301         int ret;
4302
4303         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4304                 return true;
4305
4306         cpu_buffer = buffer->buffers[cpu];
4307         local_irq_save(flags);
4308         dolock = rb_reader_lock(cpu_buffer);
4309         ret = rb_per_cpu_empty(cpu_buffer);
4310         rb_reader_unlock(cpu_buffer, dolock);
4311         local_irq_restore(flags);
4312
4313         return ret;
4314 }
4315 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4316
4317 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4318 /**
4319  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4320  * @buffer_a: One buffer to swap with
4321  * @buffer_b: The other buffer to swap with
4322  *
4323  * This function is useful for tracers that want to take a "snapshot"
4324  * of a CPU buffer and has another back up buffer lying around.
4325  * it is expected that the tracer handles the cpu buffer not being
4326  * used at the moment.
4327  */
4328 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4329                          struct ring_buffer *buffer_b, int cpu)
4330 {
4331         struct ring_buffer_per_cpu *cpu_buffer_a;
4332         struct ring_buffer_per_cpu *cpu_buffer_b;
4333         int ret = -EINVAL;
4334
4335         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4336             !cpumask_test_cpu(cpu, buffer_b->cpumask))
4337                 goto out;
4338
4339         cpu_buffer_a = buffer_a->buffers[cpu];
4340         cpu_buffer_b = buffer_b->buffers[cpu];
4341
4342         /* At least make sure the two buffers are somewhat the same */
4343         if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4344                 goto out;
4345
4346         ret = -EAGAIN;
4347
4348         if (atomic_read(&buffer_a->record_disabled))
4349                 goto out;
4350
4351         if (atomic_read(&buffer_b->record_disabled))
4352                 goto out;
4353
4354         if (atomic_read(&cpu_buffer_a->record_disabled))
4355                 goto out;
4356
4357         if (atomic_read(&cpu_buffer_b->record_disabled))
4358                 goto out;
4359
4360         /*
4361          * We can't do a synchronize_sched here because this
4362          * function can be called in atomic context.
4363          * Normally this will be called from the same CPU as cpu.
4364          * If not it's up to the caller to protect this.
4365          */
4366         atomic_inc(&cpu_buffer_a->record_disabled);
4367         atomic_inc(&cpu_buffer_b->record_disabled);
4368
4369         ret = -EBUSY;
4370         if (local_read(&cpu_buffer_a->committing))
4371                 goto out_dec;
4372         if (local_read(&cpu_buffer_b->committing))
4373                 goto out_dec;
4374
4375         buffer_a->buffers[cpu] = cpu_buffer_b;
4376         buffer_b->buffers[cpu] = cpu_buffer_a;
4377
4378         cpu_buffer_b->buffer = buffer_a;
4379         cpu_buffer_a->buffer = buffer_b;
4380
4381         ret = 0;
4382
4383 out_dec:
4384         atomic_dec(&cpu_buffer_a->record_disabled);
4385         atomic_dec(&cpu_buffer_b->record_disabled);
4386 out:
4387         return ret;
4388 }
4389 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4390 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4391
4392 /**
4393  * ring_buffer_alloc_read_page - allocate a page to read from buffer
4394  * @buffer: the buffer to allocate for.
4395  * @cpu: the cpu buffer to allocate.
4396  *
4397  * This function is used in conjunction with ring_buffer_read_page.
4398  * When reading a full page from the ring buffer, these functions
4399  * can be used to speed up the process. The calling function should
4400  * allocate a few pages first with this function. Then when it
4401  * needs to get pages from the ring buffer, it passes the result
4402  * of this function into ring_buffer_read_page, which will swap
4403  * the page that was allocated, with the read page of the buffer.
4404  *
4405  * Returns:
4406  *  The page allocated, or NULL on error.
4407  */
4408 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4409 {
4410         struct buffer_data_page *bpage;
4411         struct page *page;
4412
4413         page = alloc_pages_node(cpu_to_node(cpu),
4414                                 GFP_KERNEL | __GFP_NORETRY, 0);
4415         if (!page)
4416                 return NULL;
4417
4418         bpage = page_address(page);
4419
4420         rb_init_page(bpage);
4421
4422         return bpage;
4423 }
4424 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4425
4426 /**
4427  * ring_buffer_free_read_page - free an allocated read page
4428  * @buffer: the buffer the page was allocate for
4429  * @data: the page to free
4430  *
4431  * Free a page allocated from ring_buffer_alloc_read_page.
4432  */
4433 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4434 {
4435         free_page((unsigned long)data);
4436 }
4437 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4438
4439 /**
4440  * ring_buffer_read_page - extract a page from the ring buffer
4441  * @buffer: buffer to extract from
4442  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4443  * @len: amount to extract
4444  * @cpu: the cpu of the buffer to extract
4445  * @full: should the extraction only happen when the page is full.
4446  *
4447  * This function will pull out a page from the ring buffer and consume it.
4448  * @data_page must be the address of the variable that was returned
4449  * from ring_buffer_alloc_read_page. This is because the page might be used
4450  * to swap with a page in the ring buffer.
4451  *
4452  * for example:
4453  *      rpage = ring_buffer_alloc_read_page(buffer, cpu);
4454  *      if (!rpage)
4455  *              return error;
4456  *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4457  *      if (ret >= 0)
4458  *              process_page(rpage, ret);
4459  *
4460  * When @full is set, the function will not return true unless
4461  * the writer is off the reader page.
4462  *
4463  * Note: it is up to the calling functions to handle sleeps and wakeups.
4464  *  The ring buffer can be used anywhere in the kernel and can not
4465  *  blindly call wake_up. The layer that uses the ring buffer must be
4466  *  responsible for that.
4467  *
4468  * Returns:
4469  *  >=0 if data has been transferred, returns the offset of consumed data.
4470  *  <0 if no data has been transferred.
4471  */
4472 int ring_buffer_read_page(struct ring_buffer *buffer,
4473                           void **data_page, size_t len, int cpu, int full)
4474 {
4475         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4476         struct ring_buffer_event *event;
4477         struct buffer_data_page *bpage;
4478         struct buffer_page *reader;
4479         unsigned long missed_events;
4480         unsigned long flags;
4481         unsigned int commit;
4482         unsigned int read;
4483         u64 save_timestamp;
4484         int ret = -1;
4485
4486         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4487                 goto out;
4488
4489         /*
4490          * If len is not big enough to hold the page header, then
4491          * we can not copy anything.
4492          */
4493         if (len <= BUF_PAGE_HDR_SIZE)
4494                 goto out;
4495
4496         len -= BUF_PAGE_HDR_SIZE;
4497
4498         if (!data_page)
4499                 goto out;
4500
4501         bpage = *data_page;
4502         if (!bpage)
4503                 goto out;
4504
4505         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4506
4507         reader = rb_get_reader_page(cpu_buffer);
4508         if (!reader)
4509                 goto out_unlock;
4510
4511         event = rb_reader_event(cpu_buffer);
4512
4513         read = reader->read;
4514         commit = rb_page_commit(reader);
4515
4516         /* Check if any events were dropped */
4517         missed_events = cpu_buffer->lost_events;
4518
4519         /*
4520          * If this page has been partially read or
4521          * if len is not big enough to read the rest of the page or
4522          * a writer is still on the page, then
4523          * we must copy the data from the page to the buffer.
4524          * Otherwise, we can simply swap the page with the one passed in.
4525          */
4526         if (read || (len < (commit - read)) ||
4527             cpu_buffer->reader_page == cpu_buffer->commit_page) {
4528                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4529                 unsigned int rpos = read;
4530                 unsigned int pos = 0;
4531                 unsigned int size;
4532
4533                 if (full)
4534                         goto out_unlock;
4535
4536                 if (len > (commit - read))
4537                         len = (commit - read);
4538
4539                 /* Always keep the time extend and data together */
4540                 size = rb_event_ts_length(event);
4541
4542                 if (len < size)
4543                         goto out_unlock;
4544
4545                 /* save the current timestamp, since the user will need it */
4546                 save_timestamp = cpu_buffer->read_stamp;
4547
4548                 /* Need to copy one event at a time */
4549                 do {
4550                         /* We need the size of one event, because
4551                          * rb_advance_reader only advances by one event,
4552                          * whereas rb_event_ts_length may include the size of
4553                          * one or two events.
4554                          * We have already ensured there's enough space if this
4555                          * is a time extend. */
4556                         size = rb_event_length(event);
4557                         memcpy(bpage->data + pos, rpage->data + rpos, size);
4558
4559                         len -= size;
4560
4561                         rb_advance_reader(cpu_buffer);
4562                         rpos = reader->read;
4563                         pos += size;
4564
4565                         if (rpos >= commit)
4566                                 break;
4567
4568                         event = rb_reader_event(cpu_buffer);
4569                         /* Always keep the time extend and data together */
4570                         size = rb_event_ts_length(event);
4571                 } while (len >= size);
4572
4573                 /* update bpage */
4574                 local_set(&bpage->commit, pos);
4575                 bpage->time_stamp = save_timestamp;
4576
4577                 /* we copied everything to the beginning */
4578                 read = 0;
4579         } else {
4580                 /* update the entry counter */
4581                 cpu_buffer->read += rb_page_entries(reader);
4582                 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4583
4584                 /* swap the pages */
4585                 rb_init_page(bpage);
4586                 bpage = reader->page;
4587                 reader->page = *data_page;
4588                 local_set(&reader->write, 0);
4589                 local_set(&reader->entries, 0);
4590                 reader->read = 0;
4591                 *data_page = bpage;
4592
4593                 /*
4594                  * Use the real_end for the data size,
4595                  * This gives us a chance to store the lost events
4596                  * on the page.
4597                  */
4598                 if (reader->real_end)
4599                         local_set(&bpage->commit, reader->real_end);
4600         }
4601         ret = read;
4602
4603         cpu_buffer->lost_events = 0;
4604
4605         commit = local_read(&bpage->commit);
4606         /*
4607          * Set a flag in the commit field if we lost events
4608          */
4609         if (missed_events) {
4610                 /* If there is room at the end of the page to save the
4611                  * missed events, then record it there.
4612                  */
4613                 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4614                         memcpy(&bpage->data[commit], &missed_events,
4615                                sizeof(missed_events));
4616                         local_add(RB_MISSED_STORED, &bpage->commit);
4617                         commit += sizeof(missed_events);
4618                 }
4619                 local_add(RB_MISSED_EVENTS, &bpage->commit);
4620         }
4621
4622         /*
4623          * This page may be off to user land. Zero it out here.
4624          */
4625         if (commit < BUF_PAGE_SIZE)
4626                 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4627
4628  out_unlock:
4629         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4630
4631  out:
4632         return ret;
4633 }
4634 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4635
4636 #ifdef CONFIG_HOTPLUG_CPU
4637 static int rb_cpu_notify(struct notifier_block *self,
4638                          unsigned long action, void *hcpu)
4639 {
4640         struct ring_buffer *buffer =
4641                 container_of(self, struct ring_buffer, cpu_notify);
4642         long cpu = (long)hcpu;
4643         long nr_pages_same;
4644         int cpu_i;
4645         unsigned long nr_pages;
4646
4647         switch (action) {
4648         case CPU_UP_PREPARE:
4649         case CPU_UP_PREPARE_FROZEN:
4650                 if (cpumask_test_cpu(cpu, buffer->cpumask))
4651                         return NOTIFY_OK;
4652
4653                 nr_pages = 0;
4654                 nr_pages_same = 1;
4655                 /* check if all cpu sizes are same */
4656                 for_each_buffer_cpu(buffer, cpu_i) {
4657                         /* fill in the size from first enabled cpu */
4658                         if (nr_pages == 0)
4659                                 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4660                         if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4661                                 nr_pages_same = 0;
4662                                 break;
4663                         }
4664                 }
4665                 /* allocate minimum pages, user can later expand it */
4666                 if (!nr_pages_same)
4667                         nr_pages = 2;
4668                 buffer->buffers[cpu] =
4669                         rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4670                 if (!buffer->buffers[cpu]) {
4671                         WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4672                              cpu);
4673                         return NOTIFY_OK;
4674                 }
4675                 smp_wmb();
4676                 cpumask_set_cpu(cpu, buffer->cpumask);
4677                 break;
4678         case CPU_DOWN_PREPARE:
4679         case CPU_DOWN_PREPARE_FROZEN:
4680                 /*
4681                  * Do nothing.
4682                  *  If we were to free the buffer, then the user would
4683                  *  lose any trace that was in the buffer.
4684                  */
4685                 break;
4686         default:
4687                 break;
4688         }
4689         return NOTIFY_OK;
4690 }
4691 #endif
4692
4693 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4694 /*
4695  * This is a basic integrity check of the ring buffer.
4696  * Late in the boot cycle this test will run when configured in.
4697  * It will kick off a thread per CPU that will go into a loop
4698  * writing to the per cpu ring buffer various sizes of data.
4699  * Some of the data will be large items, some small.
4700  *
4701  * Another thread is created that goes into a spin, sending out
4702  * IPIs to the other CPUs to also write into the ring buffer.
4703  * this is to test the nesting ability of the buffer.
4704  *
4705  * Basic stats are recorded and reported. If something in the
4706  * ring buffer should happen that's not expected, a big warning
4707  * is displayed and all ring buffers are disabled.
4708  */
4709 static struct task_struct *rb_threads[NR_CPUS] __initdata;
4710
4711 struct rb_test_data {
4712         struct ring_buffer      *buffer;
4713         unsigned long           events;
4714         unsigned long           bytes_written;
4715         unsigned long           bytes_alloc;
4716         unsigned long           bytes_dropped;
4717         unsigned long           events_nested;
4718         unsigned long           bytes_written_nested;
4719         unsigned long           bytes_alloc_nested;
4720         unsigned long           bytes_dropped_nested;
4721         int                     min_size_nested;
4722         int                     max_size_nested;
4723         int                     max_size;
4724         int                     min_size;
4725         int                     cpu;
4726         int                     cnt;
4727 };
4728
4729 static struct rb_test_data rb_data[NR_CPUS] __initdata;
4730
4731 /* 1 meg per cpu */
4732 #define RB_TEST_BUFFER_SIZE     1048576
4733
4734 static char rb_string[] __initdata =
4735         "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4736         "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4737         "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4738
4739 static bool rb_test_started __initdata;
4740
4741 struct rb_item {
4742         int size;
4743         char str[];
4744 };
4745
4746 static __init int rb_write_something(struct rb_test_data *data, bool nested)
4747 {
4748         struct ring_buffer_event *event;
4749         struct rb_item *item;
4750         bool started;
4751         int event_len;
4752         int size;
4753         int len;
4754         int cnt;
4755
4756         /* Have nested writes different that what is written */
4757         cnt = data->cnt + (nested ? 27 : 0);
4758
4759         /* Multiply cnt by ~e, to make some unique increment */
4760         size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4761
4762         len = size + sizeof(struct rb_item);
4763
4764         started = rb_test_started;
4765         /* read rb_test_started before checking buffer enabled */
4766         smp_rmb();
4767
4768         event = ring_buffer_lock_reserve(data->buffer, len);
4769         if (!event) {
4770                 /* Ignore dropped events before test starts. */
4771                 if (started) {
4772                         if (nested)
4773                                 data->bytes_dropped += len;
4774                         else
4775                                 data->bytes_dropped_nested += len;
4776                 }
4777                 return len;
4778         }
4779
4780         event_len = ring_buffer_event_length(event);
4781
4782         if (RB_WARN_ON(data->buffer, event_len < len))
4783                 goto out;
4784
4785         item = ring_buffer_event_data(event);
4786         item->size = size;
4787         memcpy(item->str, rb_string, size);
4788
4789         if (nested) {
4790                 data->bytes_alloc_nested += event_len;
4791                 data->bytes_written_nested += len;
4792                 data->events_nested++;
4793                 if (!data->min_size_nested || len < data->min_size_nested)
4794                         data->min_size_nested = len;
4795                 if (len > data->max_size_nested)
4796                         data->max_size_nested = len;
4797         } else {
4798                 data->bytes_alloc += event_len;
4799                 data->bytes_written += len;
4800                 data->events++;
4801                 if (!data->min_size || len < data->min_size)
4802                         data->max_size = len;
4803                 if (len > data->max_size)
4804                         data->max_size = len;
4805         }
4806
4807  out:
4808         ring_buffer_unlock_commit(data->buffer, event);
4809
4810         return 0;
4811 }
4812
4813 static __init int rb_test(void *arg)
4814 {
4815         struct rb_test_data *data = arg;
4816
4817         while (!kthread_should_stop()) {
4818                 rb_write_something(data, false);
4819                 data->cnt++;
4820
4821                 set_current_state(TASK_INTERRUPTIBLE);
4822                 /* Now sleep between a min of 100-300us and a max of 1ms */
4823                 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4824         }
4825
4826         return 0;
4827 }
4828
4829 static __init void rb_ipi(void *ignore)
4830 {
4831         struct rb_test_data *data;
4832         int cpu = smp_processor_id();
4833
4834         data = &rb_data[cpu];
4835         rb_write_something(data, true);
4836 }
4837
4838 static __init int rb_hammer_test(void *arg)
4839 {
4840         while (!kthread_should_stop()) {
4841
4842                 /* Send an IPI to all cpus to write data! */
4843                 smp_call_function(rb_ipi, NULL, 1);
4844                 /* No sleep, but for non preempt, let others run */
4845                 schedule();
4846         }
4847
4848         return 0;
4849 }
4850
4851 static __init int test_ringbuffer(void)
4852 {
4853         struct task_struct *rb_hammer;
4854         struct ring_buffer *buffer;
4855         int cpu;
4856         int ret = 0;
4857
4858         pr_info("Running ring buffer tests...\n");
4859
4860         buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4861         if (WARN_ON(!buffer))
4862                 return 0;
4863
4864         /* Disable buffer so that threads can't write to it yet */
4865         ring_buffer_record_off(buffer);
4866
4867         for_each_online_cpu(cpu) {
4868                 rb_data[cpu].buffer = buffer;
4869                 rb_data[cpu].cpu = cpu;
4870                 rb_data[cpu].cnt = cpu;
4871                 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4872                                                  "rbtester/%d", cpu);
4873                 if (WARN_ON(!rb_threads[cpu])) {
4874                         pr_cont("FAILED\n");
4875                         ret = -1;
4876                         goto out_free;
4877                 }
4878
4879                 kthread_bind(rb_threads[cpu], cpu);
4880                 wake_up_process(rb_threads[cpu]);
4881         }
4882
4883         /* Now create the rb hammer! */
4884         rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4885         if (WARN_ON(!rb_hammer)) {
4886                 pr_cont("FAILED\n");
4887                 ret = -1;
4888                 goto out_free;
4889         }
4890
4891         ring_buffer_record_on(buffer);
4892         /*
4893          * Show buffer is enabled before setting rb_test_started.
4894          * Yes there's a small race window where events could be
4895          * dropped and the thread wont catch it. But when a ring
4896          * buffer gets enabled, there will always be some kind of
4897          * delay before other CPUs see it. Thus, we don't care about
4898          * those dropped events. We care about events dropped after
4899          * the threads see that the buffer is active.
4900          */
4901         smp_wmb();
4902         rb_test_started = true;
4903
4904         set_current_state(TASK_INTERRUPTIBLE);
4905         /* Just run for 10 seconds */;
4906         schedule_timeout(10 * HZ);
4907
4908         kthread_stop(rb_hammer);
4909
4910  out_free:
4911         for_each_online_cpu(cpu) {
4912                 if (!rb_threads[cpu])
4913                         break;
4914                 kthread_stop(rb_threads[cpu]);
4915         }
4916         if (ret) {
4917                 ring_buffer_free(buffer);
4918                 return ret;
4919         }
4920
4921         /* Report! */
4922         pr_info("finished\n");
4923         for_each_online_cpu(cpu) {
4924                 struct ring_buffer_event *event;
4925                 struct rb_test_data *data = &rb_data[cpu];
4926                 struct rb_item *item;
4927                 unsigned long total_events;
4928                 unsigned long total_dropped;
4929                 unsigned long total_written;
4930                 unsigned long total_alloc;
4931                 unsigned long total_read = 0;
4932                 unsigned long total_size = 0;
4933                 unsigned long total_len = 0;
4934                 unsigned long total_lost = 0;
4935                 unsigned long lost;
4936                 int big_event_size;
4937                 int small_event_size;
4938
4939                 ret = -1;
4940
4941                 total_events = data->events + data->events_nested;
4942                 total_written = data->bytes_written + data->bytes_written_nested;
4943                 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4944                 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4945
4946                 big_event_size = data->max_size + data->max_size_nested;
4947                 small_event_size = data->min_size + data->min_size_nested;
4948
4949                 pr_info("CPU %d:\n", cpu);
4950                 pr_info("              events:    %ld\n", total_events);
4951                 pr_info("       dropped bytes:    %ld\n", total_dropped);
4952                 pr_info("       alloced bytes:    %ld\n", total_alloc);
4953                 pr_info("       written bytes:    %ld\n", total_written);
4954                 pr_info("       biggest event:    %d\n", big_event_size);
4955                 pr_info("      smallest event:    %d\n", small_event_size);
4956
4957                 if (RB_WARN_ON(buffer, total_dropped))
4958                         break;
4959
4960                 ret = 0;
4961
4962                 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4963                         total_lost += lost;
4964                         item = ring_buffer_event_data(event);
4965                         total_len += ring_buffer_event_length(event);
4966                         total_size += item->size + sizeof(struct rb_item);
4967                         if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4968                                 pr_info("FAILED!\n");
4969                                 pr_info("buffer had: %.*s\n", item->size, item->str);
4970                                 pr_info("expected:   %.*s\n", item->size, rb_string);
4971                                 RB_WARN_ON(buffer, 1);
4972                                 ret = -1;
4973                                 break;
4974                         }
4975                         total_read++;
4976                 }
4977                 if (ret)
4978                         break;
4979
4980                 ret = -1;
4981
4982                 pr_info("         read events:   %ld\n", total_read);
4983                 pr_info("         lost events:   %ld\n", total_lost);
4984                 pr_info("        total events:   %ld\n", total_lost + total_read);
4985                 pr_info("  recorded len bytes:   %ld\n", total_len);
4986                 pr_info(" recorded size bytes:   %ld\n", total_size);
4987                 if (total_lost)
4988                         pr_info(" With dropped events, record len and size may not match\n"
4989                                 " alloced and written from above\n");
4990                 if (!total_lost) {
4991                         if (RB_WARN_ON(buffer, total_len != total_alloc ||
4992                                        total_size != total_written))
4993                                 break;
4994                 }
4995                 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4996                         break;
4997
4998                 ret = 0;
4999         }
5000         if (!ret)
5001                 pr_info("Ring buffer PASSED!\n");
5002
5003         ring_buffer_free(buffer);
5004         return 0;
5005 }
5006
5007 late_initcall(test_ringbuffer);
5008 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */