b65e3e591105fb49923f3afc0bd0db11109ff954
[cascardo/linux.git] / drivers / staging / greybus / loopback.c
1 /*
2  * Loopback bridge driver for the Greybus loopback module.
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/random.h>
16 #include <linux/sizes.h>
17 #include <linux/cdev.h>
18 #include <linux/fs.h>
19 #include <linux/kfifo.h>
20 #include <linux/debugfs.h>
21 #include <linux/list_sort.h>
22 #include <linux/spinlock.h>
23
24 #include <asm/div64.h>
25
26 #include "greybus.h"
27 #include "connection.h"
28
29 #define NSEC_PER_DAY 86400000000000ULL
30
31 struct gb_loopback_stats {
32         u32 min;
33         u32 max;
34         u64 sum;
35         u32 count;
36 };
37
38 struct gb_loopback_device {
39         struct dentry *root;
40         u32 count;
41         size_t size_max;
42
43         /* We need to take a lock in atomic context */
44         spinlock_t lock;
45         struct list_head list;
46 };
47
48 static struct gb_loopback_device gb_dev;
49
50 struct gb_loopback {
51         struct gb_connection *connection;
52
53         struct dentry *file;
54         struct kfifo kfifo_lat;
55         struct kfifo kfifo_ts;
56         struct mutex mutex;
57         struct task_struct *task;
58         struct list_head entry;
59         wait_queue_head_t wq;
60
61         /* Per connection stats */
62         struct gb_loopback_stats latency;
63         struct gb_loopback_stats throughput;
64         struct gb_loopback_stats requests_per_second;
65         struct gb_loopback_stats apbridge_unipro_latency;
66         struct gb_loopback_stats gpbridge_firmware_latency;
67
68         int type;
69         u32 mask;
70         u32 size;
71         u32 iteration_max;
72         u32 iteration_count;
73         int ms_wait;
74         u32 error;
75         u32 lbid;
76         u64 elapsed_nsecs;
77         u32 apbridge_latency_ts;
78         u32 gpbridge_latency_ts;
79 };
80
81 #define GB_LOOPBACK_FIFO_DEFAULT                        8192
82
83 static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
84 module_param(kfifo_depth, uint, 0444);
85
86 /* Maximum size of any one send data buffer we support */
87 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
88
89 #define GB_LOOPBACK_MS_WAIT_MAX                         1000
90
91 /* interface sysfs attributes */
92 #define gb_loopback_ro_attr(field)                              \
93 static ssize_t field##_show(struct device *dev,                 \
94                             struct device_attribute *attr,              \
95                             char *buf)                                  \
96 {                                                                       \
97         struct gb_bundle *bundle;                                       \
98         struct gb_loopback *gb;                                         \
99         bundle = to_gb_bundle(dev);                             \
100         gb = bundle->private;                                   \
101         return sprintf(buf, "%u\n", gb->field);                 \
102 }                                                                       \
103 static DEVICE_ATTR_RO(field)
104
105 #define gb_loopback_ro_stats_attr(name, field, type)            \
106 static ssize_t name##_##field##_show(struct device *dev,        \
107                             struct device_attribute *attr,              \
108                             char *buf)                                  \
109 {                                                                       \
110         struct gb_bundle *bundle;                                       \
111         struct gb_loopback *gb;                                         \
112         bundle = to_gb_bundle(dev);                             \
113         gb = bundle->private;                                   \
114         return sprintf(buf, "%"#type"\n", gb->name.field);      \
115 }                                                                       \
116 static DEVICE_ATTR_RO(name##_##field)
117
118 #define gb_loopback_ro_avg_attr(name)                   \
119 static ssize_t name##_avg_show(struct device *dev,              \
120                             struct device_attribute *attr,              \
121                             char *buf)                                  \
122 {                                                                       \
123         struct gb_loopback_stats *stats;                                \
124         struct gb_bundle *bundle;                                       \
125         struct gb_loopback *gb;                                         \
126         u64 avg;                                                        \
127         u32 count, rem;                                                 \
128         bundle = to_gb_bundle(dev);                             \
129         gb = bundle->private;                                   \
130         stats = &gb->name;                                      \
131         count = stats->count ? stats->count : 1;                        \
132         avg = stats->sum + count / 2;   /* round closest */             \
133         rem = do_div(avg, count);                                       \
134         return sprintf(buf, "%llu.%06u\n", avg, 1000000 * rem / count); \
135 }                                                                       \
136 static DEVICE_ATTR_RO(name##_avg)
137
138 #define gb_loopback_stats_attrs(field)                          \
139         gb_loopback_ro_stats_attr(field, min, u);               \
140         gb_loopback_ro_stats_attr(field, max, u);               \
141         gb_loopback_ro_avg_attr(field)
142
143 #define gb_loopback_attr(field, type)                                   \
144 static ssize_t field##_show(struct device *dev,                         \
145                             struct device_attribute *attr,              \
146                             char *buf)                                  \
147 {                                                                       \
148         struct gb_bundle *bundle = to_gb_bundle(dev);                   \
149         struct gb_loopback *gb = bundle->private;                       \
150         return sprintf(buf, "%"#type"\n", gb->field);                   \
151 }                                                                       \
152 static ssize_t field##_store(struct device *dev,                        \
153                             struct device_attribute *attr,              \
154                             const char *buf,                            \
155                             size_t len)                                 \
156 {                                                                       \
157         int ret;                                                        \
158         struct gb_bundle *bundle = to_gb_bundle(dev);                   \
159         struct gb_loopback *gb = bundle->private;                       \
160         mutex_lock(&gb->mutex);                                         \
161         ret = sscanf(buf, "%"#type, &gb->field);                        \
162         if (ret != 1)                                                   \
163                 len = -EINVAL;                                          \
164         else                                                            \
165                 gb_loopback_check_attr(gb, bundle);                     \
166         mutex_unlock(&gb->mutex);                                       \
167         return len;                                                     \
168 }                                                                       \
169 static DEVICE_ATTR_RW(field)
170
171 #define gb_dev_loopback_ro_attr(field, conn)                            \
172 static ssize_t field##_show(struct device *dev,         \
173                             struct device_attribute *attr,              \
174                             char *buf)                                  \
175 {                                                                       \
176         struct gb_bundle *bundle = to_gb_bundle(dev);                   \
177         struct gb_loopback *gb = bundle->private;                       \
178         return sprintf(buf, "%u\n", gb->field);                         \
179 }                                                                       \
180 static DEVICE_ATTR_RO(field)
181
182 #define gb_dev_loopback_rw_attr(field, type)                            \
183 static ssize_t field##_show(struct device *dev,                         \
184                             struct device_attribute *attr,              \
185                             char *buf)                                  \
186 {                                                                       \
187         struct gb_bundle *bundle = to_gb_bundle(dev);                   \
188         struct gb_loopback *gb = bundle->private;                       \
189         return sprintf(buf, "%"#type"\n", gb->field);                   \
190 }                                                                       \
191 static ssize_t field##_store(struct device *dev,                        \
192                             struct device_attribute *attr,              \
193                             const char *buf,                            \
194                             size_t len)                                 \
195 {                                                                       \
196         int ret;                                                        \
197         struct gb_bundle *bundle = to_gb_bundle(dev);                   \
198         struct gb_loopback *gb = bundle->private;                       \
199         mutex_lock(&gb->mutex);                                         \
200         ret = sscanf(buf, "%"#type, &gb->field);                        \
201         if (ret != 1)                                                   \
202                 len = -EINVAL;                                          \
203         else                                                            \
204                 gb_loopback_check_attr(gb, bundle);             \
205         mutex_unlock(&gb->mutex);                                       \
206         return len;                                                     \
207 }                                                                       \
208 static DEVICE_ATTR_RW(field)
209
210 static void gb_loopback_reset_stats(struct gb_loopback *gb);
211 static void gb_loopback_check_attr(struct gb_loopback *gb,
212                                    struct gb_bundle *bundle)
213 {
214         if (gb->ms_wait > GB_LOOPBACK_MS_WAIT_MAX)
215                 gb->ms_wait = GB_LOOPBACK_MS_WAIT_MAX;
216         if (gb->size > gb_dev.size_max)
217                 gb->size = gb_dev.size_max;
218         gb->iteration_count = 0;
219         gb->error = 0;
220
221         if (kfifo_depth < gb->iteration_max) {
222                 dev_warn(&bundle->dev,
223                          "cannot log bytes %u kfifo_depth %u\n",
224                          gb->iteration_max, kfifo_depth);
225         }
226         kfifo_reset_out(&gb->kfifo_lat);
227         kfifo_reset_out(&gb->kfifo_ts);
228
229         switch (gb->type) {
230         case GB_LOOPBACK_TYPE_PING:
231         case GB_LOOPBACK_TYPE_TRANSFER:
232         case GB_LOOPBACK_TYPE_SINK:
233                 gb_loopback_reset_stats(gb);
234                 wake_up(&gb->wq);
235                 break;
236         default:
237                 gb->type = 0;
238                 break;
239         }
240 }
241
242 /* Time to send and receive one message */
243 gb_loopback_stats_attrs(latency);
244 /* Number of requests sent per second on this cport */
245 gb_loopback_stats_attrs(requests_per_second);
246 /* Quantity of data sent and received on this cport */
247 gb_loopback_stats_attrs(throughput);
248 /* Latency across the UniPro link from APBridge's perspective */
249 gb_loopback_stats_attrs(apbridge_unipro_latency);
250 /* Firmware induced overhead in the GPBridge */
251 gb_loopback_stats_attrs(gpbridge_firmware_latency);
252
253 /* Number of errors encountered during loop */
254 gb_loopback_ro_attr(error);
255
256 /*
257  * Type of loopback message to send based on protocol type definitions
258  * 0 => Don't send message
259  * 2 => Send ping message continuously (message without payload)
260  * 3 => Send transfer message continuously (message with payload,
261  *                                         payload returned in response)
262  * 4 => Send a sink message (message with payload, no payload in response)
263  */
264 gb_dev_loopback_rw_attr(type, d);
265 /* Size of transfer message payload: 0-4096 bytes */
266 gb_dev_loopback_rw_attr(size, u);
267 /* Time to wait between two messages: 0-1000 ms */
268 gb_dev_loopback_rw_attr(ms_wait, d);
269 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
270 gb_dev_loopback_rw_attr(iteration_max, u);
271 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
272 gb_dev_loopback_ro_attr(iteration_count, false);
273 /* A bit-mask of destination connecitons to include in the test run */
274 gb_dev_loopback_rw_attr(mask, u);
275
276 static struct attribute *loopback_attrs[] = {
277         &dev_attr_latency_min.attr,
278         &dev_attr_latency_max.attr,
279         &dev_attr_latency_avg.attr,
280         &dev_attr_requests_per_second_min.attr,
281         &dev_attr_requests_per_second_max.attr,
282         &dev_attr_requests_per_second_avg.attr,
283         &dev_attr_throughput_min.attr,
284         &dev_attr_throughput_max.attr,
285         &dev_attr_throughput_avg.attr,
286         &dev_attr_apbridge_unipro_latency_min.attr,
287         &dev_attr_apbridge_unipro_latency_max.attr,
288         &dev_attr_apbridge_unipro_latency_avg.attr,
289         &dev_attr_gpbridge_firmware_latency_min.attr,
290         &dev_attr_gpbridge_firmware_latency_max.attr,
291         &dev_attr_gpbridge_firmware_latency_avg.attr,
292         &dev_attr_type.attr,
293         &dev_attr_size.attr,
294         &dev_attr_ms_wait.attr,
295         &dev_attr_iteration_count.attr,
296         &dev_attr_iteration_max.attr,
297         &dev_attr_mask.attr,
298         &dev_attr_error.attr,
299         NULL,
300 };
301 ATTRIBUTE_GROUPS(loopback);
302
303 static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
304 {
305         u32 lat;
306
307         do_div(elapsed_nsecs, NSEC_PER_USEC);
308         lat = elapsed_nsecs;
309         return lat;
310 }
311
312 static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
313 {
314         if (t2 > t1)
315                 return t2 - t1;
316         else
317                 return NSEC_PER_DAY - t2 + t1;
318 }
319
320 static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
321 {
322         u64 t1, t2;
323
324         t1 = timeval_to_ns(ts);
325         t2 = timeval_to_ns(te);
326
327         return __gb_loopback_calc_latency(t1, t2);
328 }
329
330 static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
331                                         struct timeval *ts, struct timeval *te)
332 {
333         kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
334         kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
335 }
336
337 static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
338                                       void *request, int request_size,
339                                       void *response, int response_size)
340 {
341         struct gb_operation *operation;
342         struct timeval ts, te;
343         int ret;
344
345         do_gettimeofday(&ts);
346         operation = gb_operation_create(gb->connection, type, request_size,
347                                         response_size, GFP_KERNEL);
348         if (!operation) {
349                 ret = -ENOMEM;
350                 goto error;
351         }
352
353         if (request_size)
354                 memcpy(operation->request->payload, request, request_size);
355
356         ret = gb_operation_request_send_sync(operation);
357         if (ret) {
358                 dev_err(&gb->connection->bundle->dev,
359                         "synchronous operation failed: %d\n", ret);
360         } else {
361                 if (response_size == operation->response->payload_size) {
362                         memcpy(response, operation->response->payload,
363                                response_size);
364                 } else {
365                         dev_err(&gb->connection->bundle->dev,
366                                 "response size %zu expected %d\n",
367                                 operation->response->payload_size,
368                                 response_size);
369                 }
370         }
371
372         gb_operation_put(operation);
373
374 error:
375         do_gettimeofday(&te);
376
377         /* Calculate the total time the message took */
378         gb_loopback_push_latency_ts(gb, &ts, &te);
379         gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
380
381         return ret;
382 }
383
384 static int gb_loopback_sink(struct gb_loopback *gb, u32 len)
385 {
386         struct gb_loopback_transfer_request *request;
387         int retval;
388
389         request = kmalloc(len + sizeof(*request), GFP_KERNEL);
390         if (!request)
391                 return -ENOMEM;
392
393         request->len = cpu_to_le32(len);
394         retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
395                                             request, len + sizeof(*request),
396                                             NULL, 0);
397         kfree(request);
398         return retval;
399 }
400
401 static int gb_loopback_transfer(struct gb_loopback *gb, u32 len)
402 {
403         struct gb_loopback_transfer_request *request;
404         struct gb_loopback_transfer_response *response;
405         int retval;
406
407         gb->apbridge_latency_ts = 0;
408         gb->gpbridge_latency_ts = 0;
409
410         request = kmalloc(len + sizeof(*request), GFP_KERNEL);
411         if (!request)
412                 return -ENOMEM;
413         response = kmalloc(len + sizeof(*response), GFP_KERNEL);
414         if (!response) {
415                 kfree(request);
416                 return -ENOMEM;
417         }
418
419         memset(request->data, 0x5A, len);
420
421         request->len = cpu_to_le32(len);
422         retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
423                                             request, len + sizeof(*request),
424                                             response, len + sizeof(*response));
425         if (retval)
426                 goto gb_error;
427
428         if (memcmp(request->data, response->data, len)) {
429                 dev_err(&gb->connection->bundle->dev,
430                         "Loopback Data doesn't match\n");
431                 retval = -EREMOTEIO;
432         }
433         gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
434         gb->gpbridge_latency_ts = (u32)__le32_to_cpu(response->reserved1);
435
436 gb_error:
437         kfree(request);
438         kfree(response);
439
440         return retval;
441 }
442
443 static int gb_loopback_ping(struct gb_loopback *gb)
444 {
445         return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
446                                           NULL, 0, NULL, 0);
447 }
448
449 static int gb_loopback_request_recv(u8 type, struct gb_operation *operation)
450 {
451         struct gb_connection *connection = operation->connection;
452         struct gb_loopback_transfer_request *request;
453         struct gb_loopback_transfer_response *response;
454         struct device *dev = &connection->bundle->dev;
455         size_t len;
456
457         /* By convention, the AP initiates the version operation */
458         switch (type) {
459         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
460                 dev_err(dev, "module-initiated version operation\n");
461                 return -EINVAL;
462         case GB_LOOPBACK_TYPE_PING:
463         case GB_LOOPBACK_TYPE_SINK:
464                 return 0;
465         case GB_LOOPBACK_TYPE_TRANSFER:
466                 if (operation->request->payload_size < sizeof(*request)) {
467                         dev_err(dev, "transfer request too small (%zu < %zu)\n",
468                                 operation->request->payload_size,
469                                 sizeof(*request));
470                         return -EINVAL; /* -EMSGSIZE */
471                 }
472                 request = operation->request->payload;
473                 len = le32_to_cpu(request->len);
474                 if (len > gb_dev.size_max) {
475                         dev_err(dev, "transfer request too large (%zu > %zu)\n",
476                                 len, gb_dev.size_max);
477                         return -EINVAL;
478                 }
479
480                 if (!gb_operation_response_alloc(operation,
481                                 len + sizeof(*response), GFP_KERNEL)) {
482                         dev_err(dev, "error allocating response\n");
483                         return -ENOMEM;
484                 }
485                 response = operation->response->payload;
486                 response->len = cpu_to_le32(len);
487                 if (len)
488                         memcpy(response->data, request->data, len);
489
490                 return 0;
491         default:
492                 dev_err(dev, "unsupported request: %u\n", type);
493                 return -EINVAL;
494         }
495 }
496
497 static void gb_loopback_reset_stats(struct gb_loopback *gb)
498 {
499         struct gb_loopback_stats reset = {
500                 .min = U32_MAX,
501         };
502
503         /* Reset per-connection stats */
504         memcpy(&gb->latency, &reset,
505                sizeof(struct gb_loopback_stats));
506         memcpy(&gb->throughput, &reset,
507                sizeof(struct gb_loopback_stats));
508         memcpy(&gb->requests_per_second, &reset,
509                sizeof(struct gb_loopback_stats));
510         memcpy(&gb->apbridge_unipro_latency, &reset,
511                sizeof(struct gb_loopback_stats));
512         memcpy(&gb->gpbridge_firmware_latency, &reset,
513                sizeof(struct gb_loopback_stats));
514
515         /* Reset aggregate stats */
516         memcpy(&gb->latency, &reset, sizeof(struct gb_loopback_stats));
517         memcpy(&gb->throughput, &reset, sizeof(struct gb_loopback_stats));
518         memcpy(&gb->requests_per_second, &reset,
519                sizeof(struct gb_loopback_stats));
520         memcpy(&gb->apbridge_unipro_latency, &reset,
521                sizeof(struct gb_loopback_stats));
522         memcpy(&gb->gpbridge_firmware_latency, &reset,
523                sizeof(struct gb_loopback_stats));
524 }
525
526 static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
527 {
528         if (stats->min > val)
529                 stats->min = val;
530         if (stats->max < val)
531                 stats->max = val;
532         stats->sum += val;
533         stats->count++;
534 }
535
536 static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
537 {
538         u32 req = USEC_PER_SEC;
539
540         do_div(req, latency);
541         gb_loopback_update_stats(&gb->requests_per_second, req);
542 }
543
544 static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
545 {
546         u32 throughput;
547         u32 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
548
549         switch (gb->type) {
550         case GB_LOOPBACK_TYPE_PING:
551                 break;
552         case GB_LOOPBACK_TYPE_SINK:
553                 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
554                                   gb->size;
555                 break;
556         case GB_LOOPBACK_TYPE_TRANSFER:
557                 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
558                                   sizeof(struct gb_loopback_transfer_response) +
559                                   gb->size * 2;
560                 break;
561         default:
562                 return;
563         }
564
565         /* Calculate bytes per second */
566         throughput = USEC_PER_SEC;
567         do_div(throughput, latency);
568         throughput *= aggregate_size;
569         gb_loopback_update_stats(&gb->throughput, throughput);
570 }
571
572 static void gb_loopback_calculate_stats(struct gb_loopback *gb)
573 {
574         u32 lat;
575
576         /* Express latency in terms of microseconds */
577         lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
578
579         /* Log latency stastic */
580         gb_loopback_update_stats(&gb->latency, lat);
581
582         /* Raw latency log on a per thread basis */
583         kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
584
585         /* Log throughput and requests using latency as benchmark */
586         gb_loopback_throughput_update(gb, lat);
587         gb_loopback_requests_update(gb, lat);
588
589         /* Log the firmware supplied latency values */
590         gb_loopback_update_stats(&gb->apbridge_unipro_latency,
591                                  gb->apbridge_latency_ts);
592         gb_loopback_update_stats(&gb->gpbridge_firmware_latency,
593                                  gb->gpbridge_latency_ts);
594 }
595
596 static int gb_loopback_fn(void *data)
597 {
598         int error = 0;
599         int ms_wait = 0;
600         int type;
601         u32 size;
602         struct gb_loopback *gb = data;
603
604         while (1) {
605                 if (!gb->type)
606                         wait_event_interruptible(gb->wq, gb->type ||
607                                                  kthread_should_stop());
608                 if (kthread_should_stop())
609                         break;
610
611                 mutex_lock(&gb->mutex);
612
613                 sysfs_notify(&gb->connection->bundle->dev.kobj,
614                              NULL, "iteration_count");
615
616                 /* Optionally terminate */
617                 if (gb->iteration_count == gb->iteration_max) {
618                         gb->type = 0;
619                         mutex_unlock(&gb->mutex);
620                         continue;
621                 }
622                 size = gb->size;
623                 ms_wait = gb->ms_wait;
624                 type = gb->type;
625                 mutex_unlock(&gb->mutex);
626
627                 /* Else operations to perform */
628                 if (type == GB_LOOPBACK_TYPE_PING)
629                         error = gb_loopback_ping(gb);
630                 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
631                         error = gb_loopback_transfer(gb, size);
632                 else if (type == GB_LOOPBACK_TYPE_SINK)
633                         error = gb_loopback_sink(gb, size);
634
635                 if (error)
636                         gb->error++;
637
638                 gb_loopback_calculate_stats(gb);
639                 gb->iteration_count++;
640
641                 if (ms_wait)
642                         msleep(ms_wait);
643         }
644         return 0;
645 }
646
647 static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
648                                                  struct kfifo *kfifo,
649                                                  struct mutex *mutex)
650 {
651         u32 latency;
652         int retval;
653
654         if (kfifo_len(kfifo) == 0) {
655                 retval = -EAGAIN;
656                 goto done;
657         }
658
659         mutex_lock(mutex);
660         retval = kfifo_out(kfifo, &latency, sizeof(latency));
661         if (retval > 0) {
662                 seq_printf(s, "%u", latency);
663                 retval = 0;
664         }
665         mutex_unlock(mutex);
666 done:
667         return retval;
668 }
669
670 static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
671 {
672         struct gb_loopback *gb = s->private;
673
674         return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
675                                                      &gb->mutex);
676 }
677
678 static int gb_loopback_latency_open(struct inode *inode, struct file *file)
679 {
680         return single_open(file, gb_loopback_dbgfs_latency_show,
681                            inode->i_private);
682 }
683
684 static const struct file_operations gb_loopback_debugfs_latency_ops = {
685         .open           = gb_loopback_latency_open,
686         .read           = seq_read,
687         .llseek         = seq_lseek,
688         .release        = single_release,
689 };
690
691 static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha,
692                                       struct list_head *lhb)
693 {
694         struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry);
695         struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry);
696         struct gb_connection *ca = a->connection;
697         struct gb_connection *cb = b->connection;
698
699         if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id)
700                 return -1;
701         if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id)
702                 return 1;
703         if (ca->bundle->id < cb->bundle->id)
704                 return -1;
705         if (cb->bundle->id < ca->bundle->id)
706                 return 1;
707         if (ca->intf_cport_id < cb->intf_cport_id)
708                 return -1;
709         else if (cb->intf_cport_id < ca->intf_cport_id)
710                 return 1;
711
712         return 0;
713 }
714
715 static void gb_loopback_insert_id(struct gb_loopback *gb)
716 {
717         struct gb_loopback *gb_list;
718         u32 new_lbid = 0;
719
720         /* perform an insertion sort */
721         list_add_tail(&gb->entry, &gb_dev.list);
722         list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare);
723         list_for_each_entry(gb_list, &gb_dev.list, entry) {
724                 gb_list->lbid = 1 << new_lbid;
725                 new_lbid++;
726         }
727 }
728
729 #define DEBUGFS_NAMELEN 32
730
731 static int gb_loopback_connection_init(struct gb_connection *connection)
732 {
733         struct gb_loopback *gb;
734         int retval;
735         char name[DEBUGFS_NAMELEN];
736         unsigned long flags;
737
738         gb = kzalloc(sizeof(*gb), GFP_KERNEL);
739         if (!gb)
740                 return -ENOMEM;
741
742         init_waitqueue_head(&gb->wq);
743         gb_loopback_reset_stats(gb);
744
745         if (!gb_dev.count) {
746                 /* Calculate maximum payload */
747                 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
748                 if (gb_dev.size_max <=
749                         sizeof(struct gb_loopback_transfer_request)) {
750                         retval = -EINVAL;
751                         goto out_kzalloc;
752                 }
753                 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
754         }
755
756         /* Create per-connection sysfs and debugfs data-points */
757         snprintf(name, sizeof(name), "raw_latency_%s",
758                  dev_name(&connection->bundle->dev));
759         gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb,
760                                        &gb_loopback_debugfs_latency_ops);
761         gb->connection = connection;
762         connection->bundle->private = gb;
763         retval = sysfs_create_groups(&connection->bundle->dev.kobj,
764                                      loopback_groups);
765         if (retval)
766                 goto out_sysfs;
767
768         /* Allocate kfifo */
769         if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
770                           GFP_KERNEL)) {
771                 retval = -ENOMEM;
772                 goto out_sysfs_conn;
773         }
774         if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
775                           GFP_KERNEL)) {
776                 retval = -ENOMEM;
777                 goto out_kfifo0;
778         }
779
780         /* Fork worker thread */
781         mutex_init(&gb->mutex);
782         gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
783         if (IS_ERR(gb->task)) {
784                 retval = PTR_ERR(gb->task);
785                 goto out_kfifo1;
786         }
787
788         spin_lock_irqsave(&gb_dev.lock, flags);
789         gb_loopback_insert_id(gb);
790         gb_dev.count++;
791         spin_unlock_irqrestore(&gb_dev.lock, flags);
792
793         gb_connection_latency_tag_enable(connection);
794         return 0;
795
796 out_kfifo1:
797         kfifo_free(&gb->kfifo_ts);
798 out_kfifo0:
799         kfifo_free(&gb->kfifo_lat);
800 out_sysfs_conn:
801         sysfs_remove_groups(&connection->bundle->dev.kobj, loopback_groups);
802 out_sysfs:
803         debugfs_remove(gb->file);
804         connection->bundle->private = NULL;
805 out_kzalloc:
806         kfree(gb);
807
808         return retval;
809 }
810
811 static void gb_loopback_connection_exit(struct gb_connection *connection)
812 {
813         struct gb_loopback *gb = connection->bundle->private;
814         unsigned long flags;
815
816         if (!IS_ERR_OR_NULL(gb->task))
817                 kthread_stop(gb->task);
818
819         connection->bundle->private = NULL;
820         kfifo_free(&gb->kfifo_lat);
821         kfifo_free(&gb->kfifo_ts);
822         gb_connection_latency_tag_disable(connection);
823         sysfs_remove_groups(&connection->bundle->dev.kobj,
824                             loopback_groups);
825         debugfs_remove(gb->file);
826
827         spin_lock_irqsave(&gb_dev.lock, flags);
828         gb_dev.count--;
829         list_del(&gb->entry);
830         spin_unlock_irqrestore(&gb_dev.lock, flags);
831
832         kfree(gb);
833 }
834
835 static struct gb_protocol loopback_protocol = {
836         .name                   = "loopback",
837         .id                     = GREYBUS_PROTOCOL_LOOPBACK,
838         .major                  = GB_LOOPBACK_VERSION_MAJOR,
839         .minor                  = GB_LOOPBACK_VERSION_MINOR,
840         .connection_init        = gb_loopback_connection_init,
841         .connection_exit        = gb_loopback_connection_exit,
842         .request_recv           = gb_loopback_request_recv,
843 };
844
845 static int loopback_init(void)
846 {
847         int retval;
848
849         INIT_LIST_HEAD(&gb_dev.list);
850         spin_lock_init(&gb_dev.lock);
851         gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
852
853         retval = gb_protocol_register(&loopback_protocol);
854         if (!retval)
855                 return retval;
856
857         debugfs_remove_recursive(gb_dev.root);
858         return retval;
859 }
860 module_init(loopback_init);
861
862 static void __exit loopback_exit(void)
863 {
864         debugfs_remove_recursive(gb_dev.root);
865         gb_protocol_deregister(&loopback_protocol);
866 }
867 module_exit(loopback_exit);
868
869 MODULE_LICENSE("GPL v2");