2 * Definitions for the 'struct ptr_ring' datastructure.
5 * Michael S. Tsirkin <mst@redhat.com>
7 * Copyright (C) 2016 Red Hat, Inc.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
14 * This is a limited-size FIFO maintaining pointers in FIFO order, with
15 * one CPU producing entries and another consuming entries from a FIFO.
17 * This implementation tries to minimize cache-contention when there is a
18 * single producer and a single consumer CPU.
21 #ifndef _LINUX_PTR_RING_H
22 #define _LINUX_PTR_RING_H 1
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/types.h>
28 #include <linux/compiler.h>
29 #include <linux/cache.h>
30 #include <linux/slab.h>
31 #include <asm/errno.h>
35 int producer ____cacheline_aligned_in_smp;
36 spinlock_t producer_lock;
37 int consumer ____cacheline_aligned_in_smp;
38 spinlock_t consumer_lock;
39 /* Shared consumer/producer data */
40 /* Read-only by both the producer and the consumer */
41 int size ____cacheline_aligned_in_smp; /* max entries in queue */
45 /* Note: callers invoking this in a loop must use a compiler barrier,
46 * for example cpu_relax(). If ring is ever resized, callers must hold
47 * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold
48 * producer_lock, the next call to __ptr_ring_produce may fail.
50 static inline bool __ptr_ring_full(struct ptr_ring *r)
52 return r->queue[r->producer];
55 static inline bool ptr_ring_full(struct ptr_ring *r)
59 spin_lock(&r->producer_lock);
60 ret = __ptr_ring_full(r);
61 spin_unlock(&r->producer_lock);
66 static inline bool ptr_ring_full_irq(struct ptr_ring *r)
70 spin_lock_irq(&r->producer_lock);
71 ret = __ptr_ring_full(r);
72 spin_unlock_irq(&r->producer_lock);
77 static inline bool ptr_ring_full_any(struct ptr_ring *r)
82 spin_lock_irqsave(&r->producer_lock, flags);
83 ret = __ptr_ring_full(r);
84 spin_unlock_irqrestore(&r->producer_lock, flags);
89 static inline bool ptr_ring_full_bh(struct ptr_ring *r)
93 spin_lock_bh(&r->producer_lock);
94 ret = __ptr_ring_full(r);
95 spin_unlock_bh(&r->producer_lock);
100 /* Note: callers invoking this in a loop must use a compiler barrier,
101 * for example cpu_relax(). Callers must hold producer_lock.
103 static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
105 if (r->queue[r->producer])
108 r->queue[r->producer++] = ptr;
109 if (unlikely(r->producer >= r->size))
114 static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
118 spin_lock(&r->producer_lock);
119 ret = __ptr_ring_produce(r, ptr);
120 spin_unlock(&r->producer_lock);
125 static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
129 spin_lock_irq(&r->producer_lock);
130 ret = __ptr_ring_produce(r, ptr);
131 spin_unlock_irq(&r->producer_lock);
136 static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
141 spin_lock_irqsave(&r->producer_lock, flags);
142 ret = __ptr_ring_produce(r, ptr);
143 spin_unlock_irqrestore(&r->producer_lock, flags);
148 static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
152 spin_lock_bh(&r->producer_lock);
153 ret = __ptr_ring_produce(r, ptr);
154 spin_unlock_bh(&r->producer_lock);
159 /* Note: callers invoking this in a loop must use a compiler barrier,
160 * for example cpu_relax(). Callers must take consumer_lock
161 * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
162 * If ring is never resized, and if the pointer is merely
163 * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
165 static inline void *__ptr_ring_peek(struct ptr_ring *r)
167 return r->queue[r->consumer];
170 /* Note: callers invoking this in a loop must use a compiler barrier,
171 * for example cpu_relax(). Callers must take consumer_lock
172 * if the ring is ever resized - see e.g. ptr_ring_empty.
174 static inline bool __ptr_ring_empty(struct ptr_ring *r)
176 return !__ptr_ring_peek(r);
179 static inline bool ptr_ring_empty(struct ptr_ring *r)
183 spin_lock(&r->consumer_lock);
184 ret = __ptr_ring_empty(r);
185 spin_unlock(&r->consumer_lock);
190 static inline bool ptr_ring_empty_irq(struct ptr_ring *r)
194 spin_lock_irq(&r->consumer_lock);
195 ret = __ptr_ring_empty(r);
196 spin_unlock_irq(&r->consumer_lock);
201 static inline bool ptr_ring_empty_any(struct ptr_ring *r)
206 spin_lock_irqsave(&r->consumer_lock, flags);
207 ret = __ptr_ring_empty(r);
208 spin_unlock_irqrestore(&r->consumer_lock, flags);
213 static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
217 spin_lock_bh(&r->consumer_lock);
218 ret = __ptr_ring_empty(r);
219 spin_unlock_bh(&r->consumer_lock);
224 /* Must only be called after __ptr_ring_peek returned !NULL */
225 static inline void __ptr_ring_discard_one(struct ptr_ring *r)
227 r->queue[r->consumer++] = NULL;
228 if (unlikely(r->consumer >= r->size))
232 static inline void *__ptr_ring_consume(struct ptr_ring *r)
236 ptr = __ptr_ring_peek(r);
238 __ptr_ring_discard_one(r);
243 static inline void *ptr_ring_consume(struct ptr_ring *r)
247 spin_lock(&r->consumer_lock);
248 ptr = __ptr_ring_consume(r);
249 spin_unlock(&r->consumer_lock);
254 static inline void *ptr_ring_consume_irq(struct ptr_ring *r)
258 spin_lock_irq(&r->consumer_lock);
259 ptr = __ptr_ring_consume(r);
260 spin_unlock_irq(&r->consumer_lock);
265 static inline void *ptr_ring_consume_any(struct ptr_ring *r)
270 spin_lock_irqsave(&r->consumer_lock, flags);
271 ptr = __ptr_ring_consume(r);
272 spin_unlock_irqrestore(&r->consumer_lock, flags);
277 static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
281 spin_lock_bh(&r->consumer_lock);
282 ptr = __ptr_ring_consume(r);
283 spin_unlock_bh(&r->consumer_lock);
288 /* Cast to structure type and call a function without discarding from FIFO.
289 * Function must return a value.
290 * Callers must take consumer_lock.
292 #define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r)))
294 #define PTR_RING_PEEK_CALL(r, f) ({ \
295 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
297 spin_lock(&(r)->consumer_lock); \
298 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
299 spin_unlock(&(r)->consumer_lock); \
300 __PTR_RING_PEEK_CALL_v; \
303 #define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \
304 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
306 spin_lock_irq(&(r)->consumer_lock); \
307 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
308 spin_unlock_irq(&(r)->consumer_lock); \
309 __PTR_RING_PEEK_CALL_v; \
312 #define PTR_RING_PEEK_CALL_BH(r, f) ({ \
313 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
315 spin_lock_bh(&(r)->consumer_lock); \
316 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
317 spin_unlock_bh(&(r)->consumer_lock); \
318 __PTR_RING_PEEK_CALL_v; \
321 #define PTR_RING_PEEK_CALL_ANY(r, f) ({ \
322 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
323 unsigned long __PTR_RING_PEEK_CALL_f;\
325 spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
326 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
327 spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
328 __PTR_RING_PEEK_CALL_v; \
331 static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
333 return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
336 static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
338 r->queue = __ptr_ring_init_queue_alloc(size, gfp);
343 r->producer = r->consumer = 0;
344 spin_lock_init(&r->producer_lock);
345 spin_lock_init(&r->consumer_lock);
350 static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
351 void (*destroy)(void *))
355 void **queue = __ptr_ring_init_queue_alloc(size, gfp);
362 spin_lock_irqsave(&(r)->producer_lock, flags);
364 while ((ptr = ptr_ring_consume(r)))
366 queue[producer++] = ptr;
371 r->producer = producer;
376 spin_unlock_irqrestore(&(r)->producer_lock, flags);
383 static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
388 while ((ptr = ptr_ring_consume(r)))
393 #endif /* _LINUX_PTR_RING_H */