perf/core: Free AUX pages in unmap path
[cascardo/linux.git] / kernel / events / ring_buffer.c
index 1faad2c..367e9c5 100644 (file)
@@ -221,8 +221,6 @@ void perf_output_end(struct perf_output_handle *handle)
        rcu_read_unlock();
 }
 
-static void rb_irq_work(struct irq_work *work);
-
 static void
 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 {
@@ -243,16 +241,6 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 
        INIT_LIST_HEAD(&rb->event_list);
        spin_lock_init(&rb->event_lock);
-       init_irq_work(&rb->irq_work, rb_irq_work);
-}
-
-static void ring_buffer_put_async(struct ring_buffer *rb)
-{
-       if (!atomic_dec_and_test(&rb->refcount))
-               return;
-
-       rb->rcu_head.next = (void *)rb;
-       irq_work_queue(&rb->irq_work);
 }
 
 /*
@@ -287,6 +275,13 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
        if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
                goto err;
 
+       /*
+        * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
+        * the aux buffer is in perf_mmap_close(), about to get freed.
+        */
+       if (!atomic_read(&rb->aux_mmap_count))
+               goto err_put;
+
        /*
         * Nesting is not supported for AUX area, make sure nested
         * writers are caught early
@@ -331,7 +326,7 @@ err_put:
        rb_free_aux(rb);
 
 err:
-       ring_buffer_put_async(rb);
+       ring_buffer_put(rb);
        handle->event = NULL;
 
        return NULL;
@@ -382,7 +377,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
 
        local_set(&rb->aux_nest, 0);
        rb_free_aux(rb);
-       ring_buffer_put_async(rb);
+       ring_buffer_put(rb);
 }
 
 /*
@@ -463,6 +458,14 @@ static void __rb_free_aux(struct ring_buffer *rb)
 {
        int pg;
 
+       /*
+        * Should never happen, the last reference should be dropped from
+        * perf_mmap_close() path, which first stops aux transactions (which
+        * in turn are the atomic holders of aux_refcount) and then does the
+        * last rb_free_aux().
+        */
+       WARN_ON_ONCE(in_atomic());
+
        if (rb->aux_priv) {
                rb->free_aux(rb->aux_priv);
                rb->free_aux = NULL;
@@ -574,18 +577,7 @@ out:
 void rb_free_aux(struct ring_buffer *rb)
 {
        if (atomic_dec_and_test(&rb->aux_refcount))
-               irq_work_queue(&rb->irq_work);
-}
-
-static void rb_irq_work(struct irq_work *work)
-{
-       struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
-
-       if (!atomic_read(&rb->aux_refcount))
                __rb_free_aux(rb);
-
-       if (rb->rcu_head.next == (void *)rb)
-               call_rcu(&rb->rcu_head, rb_free_rcu);
 }
 
 #ifndef CONFIG_PERF_USE_VMALLOC
@@ -746,8 +738,10 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
 
        rb->user_page = all_buf;
        rb->data_pages[0] = all_buf + PAGE_SIZE;
-       rb->page_order = ilog2(nr_pages);
-       rb->nr_pages = !!nr_pages;
+       if (nr_pages) {
+               rb->nr_pages = 1;
+               rb->page_order = ilog2(nr_pages);
+       }
 
        ring_buffer_init(rb, watermark, flags);