12 #define SMP_CACHE_BYTES 64
13 #define cache_line_size() SMP_CACHE_BYTES
14 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
15 #define unlikely(x) (__builtin_expect(!!(x), 0))
16 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
17 typedef pthread_spinlock_t spinlock_t;
20 static void *kzalloc(unsigned size, gfp_t gfp)
22 void *p = memalign(64, size);
30 static void kfree(void *p)
36 static void spin_lock_init(spinlock_t *lock)
38 int r = pthread_spin_init(lock, 0);
42 static void spin_lock(spinlock_t *lock)
44 int ret = pthread_spin_lock(lock);
48 static void spin_unlock(spinlock_t *lock)
50 int ret = pthread_spin_unlock(lock);
54 static void spin_lock_bh(spinlock_t *lock)
59 static void spin_unlock_bh(spinlock_t *lock)
64 static void spin_lock_irq(spinlock_t *lock)
69 static void spin_unlock_irq(spinlock_t *lock)
74 static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
79 static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
84 #include "../../../include/linux/ptr_ring.h"
86 static unsigned long long headcnt, tailcnt;
87 static struct ptr_ring array ____cacheline_aligned_in_smp;
89 /* implemented by ring */
92 int ret = ptr_ring_init(&array, ring_size, 0);
97 int add_inbuf(unsigned len, void *buf, void *datap)
101 ret = __ptr_ring_produce(&array, buf);
111 * ptr_ring API provides no way for producer to find out whether a given
112 * buffer was consumed. Our tests merely require that a successful get_buf
113 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
114 * fake it accordingly.
116 void *get_buf(unsigned *lenp, void **bufp)
120 if (tailcnt == headcnt || __ptr_ring_full(&array))
135 if (tailcnt == headcnt || __ptr_ring_full(&array)) {
154 void kick_available(void)
170 void poll_avail(void)
176 b = __ptr_ring_peek(&array);
180 bool use_buf(unsigned *lenp, void **bufp)
184 ptr = __ptr_ring_consume(&array);