tools/virtio: add ringtest utilities
[cascardo/linux.git] / tools / virtio / ringtest / virtio_ring_0_9.c
1 /*
2  * Copyright (C) 2016 Red Hat, Inc.
3  * Author: Michael S. Tsirkin <mst@redhat.com>
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * Partial implementation of virtio 0.9. event index is used for signalling,
7  * unconditionally. Design roughly follows linux kernel implementation in order
8  * to be able to judge its performance.
9  */
10 #define _GNU_SOURCE
11 #include "main.h"
12 #include <stdlib.h>
13 #include <stdio.h>
14 #include <assert.h>
15 #include <string.h>
16 #include <linux/virtio_ring.h>
17
18 struct data {
19         void *data;
20 } *data;
21
22 struct vring ring;
23
24 /* enabling the below activates experimental ring polling code
25  * (which skips index reads on consumer in favor of looking at
26  * high bits of ring id ^ 0x8000).
27  */
28 /* #ifdef RING_POLL */
29
30 /* how much padding is needed to avoid false cache sharing */
31 #define HOST_GUEST_PADDING 0x80
32
33 struct guest {
34         unsigned short avail_idx;
35         unsigned short last_used_idx;
36         unsigned short num_free;
37         unsigned short kicked_avail_idx;
38         unsigned short free_head;
39         unsigned char reserved[HOST_GUEST_PADDING - 10];
40 } guest;
41
42 struct host {
43         /* we do not need to track last avail index
44          * unless we have more than one in flight.
45          */
46         unsigned short used_idx;
47         unsigned short called_used_idx;
48         unsigned char reserved[HOST_GUEST_PADDING - 4];
49 } host;
50
51 /* implemented by ring */
52 void alloc_ring(void)
53 {
54         int ret;
55         int i;
56         void *p;
57
58         ret = posix_memalign(&p, 0x1000, vring_size(ring_size, 0x1000));
59         if (ret) {
60                 perror("Unable to allocate ring buffer.\n");
61                 exit(3);
62         }
63         memset(p, 0, vring_size(ring_size, 0x1000));
64         vring_init(&ring, ring_size, p, 0x1000);
65
66         guest.avail_idx = 0;
67         guest.kicked_avail_idx = -1;
68         guest.last_used_idx = 0;
69         /* Put everything in free lists. */
70         guest.free_head = 0;
71         for (i = 0; i < ring_size - 1; i++)
72                 ring.desc[i].next = i + 1;
73         host.used_idx = 0;
74         host.called_used_idx = -1;
75         guest.num_free = ring_size;
76         data = malloc(ring_size * sizeof *data);
77         if (!data) {
78                 perror("Unable to allocate data buffer.\n");
79                 exit(3);
80         }
81         memset(data, 0, ring_size * sizeof *data);
82 }
83
84 /* guest side */
85 int add_inbuf(unsigned len, void *buf, void *datap)
86 {
87         unsigned head, avail;
88         struct vring_desc *desc;
89
90         if (!guest.num_free)
91                 return -1;
92
93         head = guest.free_head;
94         guest.num_free--;
95
96         desc = ring.desc;
97         desc[head].flags = VRING_DESC_F_NEXT;
98         desc[head].addr = (unsigned long)(void *)buf;
99         desc[head].len = len;
100         /* We do it like this to simulate the way
101          * we'd have to flip it if we had multiple
102          * descriptors.
103          */
104         desc[head].flags &= ~VRING_DESC_F_NEXT;
105         guest.free_head = desc[head].next;
106
107         data[head].data = datap;
108
109 #ifdef RING_POLL
110         /* Barrier A (for pairing) */
111         smp_release();
112         avail = guest.avail_idx++;
113         ring.avail->ring[avail & (ring_size - 1)] =
114                 (head | (avail & ~(ring_size - 1))) ^ 0x8000;
115 #else
116         avail = (ring_size - 1) & (guest.avail_idx++);
117         ring.avail->ring[avail] = head;
118         /* Barrier A (for pairing) */
119         smp_release();
120 #endif
121         ring.avail->idx = guest.avail_idx;
122         return 0;
123 }
124
125 void *get_buf(unsigned *lenp, void **bufp)
126 {
127         unsigned head;
128         unsigned index;
129         void *datap;
130
131 #ifdef RING_POLL
132         head = (ring_size - 1) & guest.last_used_idx;
133         index = ring.used->ring[head].id;
134         if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
135                 return NULL;
136         /* Barrier B (for pairing) */
137         smp_acquire();
138         index &= ring_size - 1;
139 #else
140         if (ring.used->idx == guest.last_used_idx)
141                 return NULL;
142         /* Barrier B (for pairing) */
143         smp_acquire();
144         head = (ring_size - 1) & guest.last_used_idx;
145         index = ring.used->ring[head].id;
146 #endif
147         *lenp = ring.used->ring[head].len;
148         datap = data[index].data;
149         *bufp = (void*)(unsigned long)ring.desc[index].addr;
150         data[index].data = NULL;
151         ring.desc[index].next = guest.free_head;
152         guest.free_head = index;
153         guest.num_free++;
154         guest.last_used_idx++;
155         return datap;
156 }
157
158 void poll_used(void)
159 {
160 #ifdef RING_POLL
161         unsigned head = (ring_size - 1) & guest.last_used_idx;
162
163         for (;;) {
164                 unsigned index = ring.used->ring[head].id;
165
166                 if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
167                         busy_wait();
168                 else
169                         break;
170         }
171 #else
172         unsigned head = guest.last_used_idx;
173
174         while (ring.used->idx == head)
175                 busy_wait();
176 #endif
177 }
178
179 void disable_call()
180 {
181         /* Doing nothing to disable calls might cause
182          * extra interrupts, but reduces the number of cache misses.
183          */
184 }
185
186 bool enable_call()
187 {
188         unsigned short last_used_idx;
189
190         vring_used_event(&ring) = (last_used_idx = guest.last_used_idx);
191         /* Flush call index write */
192         /* Barrier D (for pairing) */
193         smp_mb();
194 #ifdef RING_POLL
195         {
196                 unsigned short head = last_used_idx & (ring_size - 1);
197                 unsigned index = ring.used->ring[head].id;
198
199                 return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
200         }
201 #else
202         return ring.used->idx == last_used_idx;
203 #endif
204 }
205
206 void kick_available(void)
207 {
208         /* Flush in previous flags write */
209         /* Barrier C (for pairing) */
210         smp_mb();
211         if (!vring_need_event(vring_avail_event(&ring),
212                               guest.avail_idx,
213                               guest.kicked_avail_idx))
214                 return;
215
216         guest.kicked_avail_idx = guest.avail_idx;
217         kick();
218 }
219
220 /* host side */
221 void disable_kick()
222 {
223         /* Doing nothing to disable kicks might cause
224          * extra interrupts, but reduces the number of cache misses.
225          */
226 }
227
228 bool enable_kick()
229 {
230         unsigned head = host.used_idx;
231
232         vring_avail_event(&ring) = head;
233         /* Barrier C (for pairing) */
234         smp_mb();
235 #ifdef RING_POLL
236         {
237                 unsigned index = ring.avail->ring[head & (ring_size - 1)];
238
239                 return (index ^ head ^ 0x8000) & ~(ring_size - 1);
240         }
241 #else
242         return head == ring.avail->idx;
243 #endif
244 }
245
246 void poll_avail(void)
247 {
248         unsigned head = host.used_idx;
249 #ifdef RING_POLL
250         for (;;) {
251                 unsigned index = ring.avail->ring[head & (ring_size - 1)];
252                 if ((index ^ head ^ 0x8000) & ~(ring_size - 1))
253                         busy_wait();
254                 else
255                         break;
256         }
257 #else
258         while (ring.avail->idx == head)
259                 busy_wait();
260 #endif
261 }
262
263 bool use_buf(unsigned *lenp, void **bufp)
264 {
265         unsigned used_idx = host.used_idx;
266         struct vring_desc *desc;
267         unsigned head;
268
269 #ifdef RING_POLL
270         head = ring.avail->ring[used_idx & (ring_size - 1)];
271         if ((used_idx ^ head ^ 0x8000) & ~(ring_size - 1))
272                 return false;
273         /* Barrier A (for pairing) */
274         smp_acquire();
275
276         used_idx &= ring_size - 1;
277         desc = &ring.desc[head & (ring_size - 1)];
278 #else
279         if (used_idx == ring.avail->idx)
280                 return false;
281
282         /* Barrier A (for pairing) */
283         smp_acquire();
284
285         used_idx &= ring_size - 1;
286         head = ring.avail->ring[used_idx];
287         desc = &ring.desc[head];
288 #endif
289
290         *lenp = desc->len;
291         *bufp = (void *)(unsigned long)desc->addr;
292
293         /* now update used ring */
294         ring.used->ring[used_idx].id = head;
295         ring.used->ring[used_idx].len = desc->len - 1;
296         /* Barrier B (for pairing) */
297         smp_release();
298         host.used_idx++;
299         ring.used->idx = host.used_idx;
300         
301         return true;
302 }
303
304 void call_used(void)
305 {
306         /* Flush in previous flags write */
307         /* Barrier D (for pairing) */
308         smp_mb();
309         if (!vring_need_event(vring_used_event(&ring),
310                               host.used_idx,
311                               host.called_used_idx))
312                 return;
313
314         host.called_used_idx = host.used_idx;
315         call();
316 }