2 * Loopback bridge driver for the Greybus loopback module.
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/random.h>
16 #include <linux/sizes.h>
17 #include <linux/cdev.h>
19 #include <linux/kfifo.h>
20 #include <linux/debugfs.h>
21 #include <linux/list_sort.h>
22 #include <linux/spinlock.h>
24 #include <asm/div64.h>
27 #include "connection.h"
29 #define NSEC_PER_DAY 86400000000000ULL
31 struct gb_loopback_stats {
38 struct gb_loopback_device {
43 /* We need to take a lock in atomic context */
45 struct list_head list;
48 static struct gb_loopback_device gb_dev;
51 struct gb_connection *connection;
54 struct kfifo kfifo_lat;
55 struct kfifo kfifo_ts;
57 struct task_struct *task;
58 struct list_head entry;
61 /* Per connection stats */
62 struct gb_loopback_stats latency;
63 struct gb_loopback_stats throughput;
64 struct gb_loopback_stats requests_per_second;
65 struct gb_loopback_stats apbridge_unipro_latency;
66 struct gb_loopback_stats gpbridge_firmware_latency;
77 u32 apbridge_latency_ts;
78 u32 gpbridge_latency_ts;
81 #define GB_LOOPBACK_FIFO_DEFAULT 8192
83 static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
84 module_param(kfifo_depth, uint, 0444);
86 /* Maximum size of any one send data buffer we support */
87 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
89 #define GB_LOOPBACK_MS_WAIT_MAX 1000
91 /* interface sysfs attributes */
92 #define gb_loopback_ro_attr(field) \
93 static ssize_t field##_show(struct device *dev, \
94 struct device_attribute *attr, \
97 struct gb_bundle *bundle; \
98 struct gb_loopback *gb; \
99 bundle = to_gb_bundle(dev); \
100 gb = bundle->private; \
101 return sprintf(buf, "%u\n", gb->field); \
103 static DEVICE_ATTR_RO(field)
105 #define gb_loopback_ro_stats_attr(name, field, type) \
106 static ssize_t name##_##field##_show(struct device *dev, \
107 struct device_attribute *attr, \
110 struct gb_bundle *bundle; \
111 struct gb_loopback *gb; \
112 bundle = to_gb_bundle(dev); \
113 gb = bundle->private; \
114 return sprintf(buf, "%"#type"\n", gb->name.field); \
116 static DEVICE_ATTR_RO(name##_##field)
118 #define gb_loopback_ro_avg_attr(name) \
119 static ssize_t name##_avg_show(struct device *dev, \
120 struct device_attribute *attr, \
123 struct gb_loopback_stats *stats; \
124 struct gb_bundle *bundle; \
125 struct gb_loopback *gb; \
128 bundle = to_gb_bundle(dev); \
129 gb = bundle->private; \
131 count = stats->count ? stats->count : 1; \
132 avg = stats->sum + count / 2; /* round closest */ \
133 rem = do_div(avg, count); \
134 return sprintf(buf, "%llu.%06u\n", avg, 1000000 * rem / count); \
136 static DEVICE_ATTR_RO(name##_avg)
138 #define gb_loopback_stats_attrs(field) \
139 gb_loopback_ro_stats_attr(field, min, u); \
140 gb_loopback_ro_stats_attr(field, max, u); \
141 gb_loopback_ro_avg_attr(field)
143 #define gb_loopback_attr(field, type) \
144 static ssize_t field##_show(struct device *dev, \
145 struct device_attribute *attr, \
148 struct gb_bundle *bundle = to_gb_bundle(dev); \
149 struct gb_loopback *gb = bundle->private; \
150 return sprintf(buf, "%"#type"\n", gb->field); \
152 static ssize_t field##_store(struct device *dev, \
153 struct device_attribute *attr, \
158 struct gb_bundle *bundle = to_gb_bundle(dev); \
159 struct gb_loopback *gb = bundle->private; \
160 mutex_lock(&gb->mutex); \
161 ret = sscanf(buf, "%"#type, &gb->field); \
165 gb_loopback_check_attr(gb, bundle); \
166 mutex_unlock(&gb->mutex); \
169 static DEVICE_ATTR_RW(field)
171 #define gb_dev_loopback_ro_attr(field, conn) \
172 static ssize_t field##_show(struct device *dev, \
173 struct device_attribute *attr, \
176 struct gb_bundle *bundle = to_gb_bundle(dev); \
177 struct gb_loopback *gb = bundle->private; \
178 return sprintf(buf, "%u\n", gb->field); \
180 static DEVICE_ATTR_RO(field)
182 #define gb_dev_loopback_rw_attr(field, type) \
183 static ssize_t field##_show(struct device *dev, \
184 struct device_attribute *attr, \
187 struct gb_bundle *bundle = to_gb_bundle(dev); \
188 struct gb_loopback *gb = bundle->private; \
189 return sprintf(buf, "%"#type"\n", gb->field); \
191 static ssize_t field##_store(struct device *dev, \
192 struct device_attribute *attr, \
197 struct gb_bundle *bundle = to_gb_bundle(dev); \
198 struct gb_loopback *gb = bundle->private; \
199 mutex_lock(&gb->mutex); \
200 ret = sscanf(buf, "%"#type, &gb->field); \
204 gb_loopback_check_attr(gb, bundle); \
205 mutex_unlock(&gb->mutex); \
208 static DEVICE_ATTR_RW(field)
210 static void gb_loopback_reset_stats(struct gb_loopback *gb);
211 static void gb_loopback_check_attr(struct gb_loopback *gb,
212 struct gb_bundle *bundle)
214 if (gb->ms_wait > GB_LOOPBACK_MS_WAIT_MAX)
215 gb->ms_wait = GB_LOOPBACK_MS_WAIT_MAX;
216 if (gb->size > gb_dev.size_max)
217 gb->size = gb_dev.size_max;
218 gb->iteration_count = 0;
221 if (kfifo_depth < gb->iteration_max) {
222 dev_warn(&bundle->dev,
223 "cannot log bytes %u kfifo_depth %u\n",
224 gb->iteration_max, kfifo_depth);
226 kfifo_reset_out(&gb->kfifo_lat);
227 kfifo_reset_out(&gb->kfifo_ts);
230 case GB_LOOPBACK_TYPE_PING:
231 case GB_LOOPBACK_TYPE_TRANSFER:
232 case GB_LOOPBACK_TYPE_SINK:
233 gb_loopback_reset_stats(gb);
242 /* Time to send and receive one message */
243 gb_loopback_stats_attrs(latency);
244 /* Number of requests sent per second on this cport */
245 gb_loopback_stats_attrs(requests_per_second);
246 /* Quantity of data sent and received on this cport */
247 gb_loopback_stats_attrs(throughput);
248 /* Latency across the UniPro link from APBridge's perspective */
249 gb_loopback_stats_attrs(apbridge_unipro_latency);
250 /* Firmware induced overhead in the GPBridge */
251 gb_loopback_stats_attrs(gpbridge_firmware_latency);
253 /* Number of errors encountered during loop */
254 gb_loopback_ro_attr(error);
257 * Type of loopback message to send based on protocol type definitions
258 * 0 => Don't send message
259 * 2 => Send ping message continuously (message without payload)
260 * 3 => Send transfer message continuously (message with payload,
261 * payload returned in response)
262 * 4 => Send a sink message (message with payload, no payload in response)
264 gb_dev_loopback_rw_attr(type, d);
265 /* Size of transfer message payload: 0-4096 bytes */
266 gb_dev_loopback_rw_attr(size, u);
267 /* Time to wait between two messages: 0-1000 ms */
268 gb_dev_loopback_rw_attr(ms_wait, d);
269 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
270 gb_dev_loopback_rw_attr(iteration_max, u);
271 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
272 gb_dev_loopback_ro_attr(iteration_count, false);
273 /* A bit-mask of destination connecitons to include in the test run */
274 gb_dev_loopback_rw_attr(mask, u);
276 static struct attribute *loopback_attrs[] = {
277 &dev_attr_latency_min.attr,
278 &dev_attr_latency_max.attr,
279 &dev_attr_latency_avg.attr,
280 &dev_attr_requests_per_second_min.attr,
281 &dev_attr_requests_per_second_max.attr,
282 &dev_attr_requests_per_second_avg.attr,
283 &dev_attr_throughput_min.attr,
284 &dev_attr_throughput_max.attr,
285 &dev_attr_throughput_avg.attr,
286 &dev_attr_apbridge_unipro_latency_min.attr,
287 &dev_attr_apbridge_unipro_latency_max.attr,
288 &dev_attr_apbridge_unipro_latency_avg.attr,
289 &dev_attr_gpbridge_firmware_latency_min.attr,
290 &dev_attr_gpbridge_firmware_latency_max.attr,
291 &dev_attr_gpbridge_firmware_latency_avg.attr,
294 &dev_attr_ms_wait.attr,
295 &dev_attr_iteration_count.attr,
296 &dev_attr_iteration_max.attr,
298 &dev_attr_error.attr,
301 ATTRIBUTE_GROUPS(loopback);
303 static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
307 do_div(elapsed_nsecs, NSEC_PER_USEC);
312 static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
317 return NSEC_PER_DAY - t2 + t1;
320 static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
324 t1 = timeval_to_ns(ts);
325 t2 = timeval_to_ns(te);
327 return __gb_loopback_calc_latency(t1, t2);
330 static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
331 struct timeval *ts, struct timeval *te)
333 kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
334 kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
337 static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
338 void *request, int request_size,
339 void *response, int response_size)
341 struct gb_operation *operation;
342 struct timeval ts, te;
345 do_gettimeofday(&ts);
346 operation = gb_operation_create(gb->connection, type, request_size,
347 response_size, GFP_KERNEL);
354 memcpy(operation->request->payload, request, request_size);
356 ret = gb_operation_request_send_sync(operation);
358 dev_err(&gb->connection->bundle->dev,
359 "synchronous operation failed: %d\n", ret);
361 if (response_size == operation->response->payload_size) {
362 memcpy(response, operation->response->payload,
365 dev_err(&gb->connection->bundle->dev,
366 "response size %zu expected %d\n",
367 operation->response->payload_size,
372 gb_operation_put(operation);
375 do_gettimeofday(&te);
377 /* Calculate the total time the message took */
378 gb_loopback_push_latency_ts(gb, &ts, &te);
379 gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
384 static int gb_loopback_sink(struct gb_loopback *gb, u32 len)
386 struct gb_loopback_transfer_request *request;
389 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
393 request->len = cpu_to_le32(len);
394 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
395 request, len + sizeof(*request),
401 static int gb_loopback_transfer(struct gb_loopback *gb, u32 len)
403 struct gb_loopback_transfer_request *request;
404 struct gb_loopback_transfer_response *response;
407 gb->apbridge_latency_ts = 0;
408 gb->gpbridge_latency_ts = 0;
410 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
413 response = kmalloc(len + sizeof(*response), GFP_KERNEL);
419 memset(request->data, 0x5A, len);
421 request->len = cpu_to_le32(len);
422 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
423 request, len + sizeof(*request),
424 response, len + sizeof(*response));
428 if (memcmp(request->data, response->data, len)) {
429 dev_err(&gb->connection->bundle->dev,
430 "Loopback Data doesn't match\n");
433 gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
434 gb->gpbridge_latency_ts = (u32)__le32_to_cpu(response->reserved1);
443 static int gb_loopback_ping(struct gb_loopback *gb)
445 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
449 static int gb_loopback_request_recv(u8 type, struct gb_operation *operation)
451 struct gb_connection *connection = operation->connection;
452 struct gb_loopback_transfer_request *request;
453 struct gb_loopback_transfer_response *response;
454 struct device *dev = &connection->bundle->dev;
457 /* By convention, the AP initiates the version operation */
459 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
460 dev_err(dev, "module-initiated version operation\n");
462 case GB_LOOPBACK_TYPE_PING:
463 case GB_LOOPBACK_TYPE_SINK:
465 case GB_LOOPBACK_TYPE_TRANSFER:
466 if (operation->request->payload_size < sizeof(*request)) {
467 dev_err(dev, "transfer request too small (%zu < %zu)\n",
468 operation->request->payload_size,
470 return -EINVAL; /* -EMSGSIZE */
472 request = operation->request->payload;
473 len = le32_to_cpu(request->len);
474 if (len > gb_dev.size_max) {
475 dev_err(dev, "transfer request too large (%zu > %zu)\n",
476 len, gb_dev.size_max);
480 if (!gb_operation_response_alloc(operation,
481 len + sizeof(*response), GFP_KERNEL)) {
482 dev_err(dev, "error allocating response\n");
485 response = operation->response->payload;
486 response->len = cpu_to_le32(len);
488 memcpy(response->data, request->data, len);
492 dev_err(dev, "unsupported request: %u\n", type);
497 static void gb_loopback_reset_stats(struct gb_loopback *gb)
499 struct gb_loopback_stats reset = {
503 /* Reset per-connection stats */
504 memcpy(&gb->latency, &reset,
505 sizeof(struct gb_loopback_stats));
506 memcpy(&gb->throughput, &reset,
507 sizeof(struct gb_loopback_stats));
508 memcpy(&gb->requests_per_second, &reset,
509 sizeof(struct gb_loopback_stats));
510 memcpy(&gb->apbridge_unipro_latency, &reset,
511 sizeof(struct gb_loopback_stats));
512 memcpy(&gb->gpbridge_firmware_latency, &reset,
513 sizeof(struct gb_loopback_stats));
515 /* Reset aggregate stats */
516 memcpy(&gb->latency, &reset, sizeof(struct gb_loopback_stats));
517 memcpy(&gb->throughput, &reset, sizeof(struct gb_loopback_stats));
518 memcpy(&gb->requests_per_second, &reset,
519 sizeof(struct gb_loopback_stats));
520 memcpy(&gb->apbridge_unipro_latency, &reset,
521 sizeof(struct gb_loopback_stats));
522 memcpy(&gb->gpbridge_firmware_latency, &reset,
523 sizeof(struct gb_loopback_stats));
526 static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
528 if (stats->min > val)
530 if (stats->max < val)
536 static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
538 u32 req = USEC_PER_SEC;
540 do_div(req, latency);
541 gb_loopback_update_stats(&gb->requests_per_second, req);
544 static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
547 u32 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
550 case GB_LOOPBACK_TYPE_PING:
552 case GB_LOOPBACK_TYPE_SINK:
553 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
556 case GB_LOOPBACK_TYPE_TRANSFER:
557 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
558 sizeof(struct gb_loopback_transfer_response) +
565 /* Calculate bytes per second */
566 throughput = USEC_PER_SEC;
567 do_div(throughput, latency);
568 throughput *= aggregate_size;
569 gb_loopback_update_stats(&gb->throughput, throughput);
572 static void gb_loopback_calculate_stats(struct gb_loopback *gb)
576 /* Express latency in terms of microseconds */
577 lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
579 /* Log latency stastic */
580 gb_loopback_update_stats(&gb->latency, lat);
582 /* Raw latency log on a per thread basis */
583 kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
585 /* Log throughput and requests using latency as benchmark */
586 gb_loopback_throughput_update(gb, lat);
587 gb_loopback_requests_update(gb, lat);
589 /* Log the firmware supplied latency values */
590 gb_loopback_update_stats(&gb->apbridge_unipro_latency,
591 gb->apbridge_latency_ts);
592 gb_loopback_update_stats(&gb->gpbridge_firmware_latency,
593 gb->gpbridge_latency_ts);
596 static int gb_loopback_fn(void *data)
602 struct gb_loopback *gb = data;
606 wait_event_interruptible(gb->wq, gb->type ||
607 kthread_should_stop());
608 if (kthread_should_stop())
611 mutex_lock(&gb->mutex);
613 sysfs_notify(&gb->connection->bundle->dev.kobj,
614 NULL, "iteration_count");
616 /* Optionally terminate */
617 if (gb->iteration_count == gb->iteration_max) {
619 mutex_unlock(&gb->mutex);
623 ms_wait = gb->ms_wait;
625 mutex_unlock(&gb->mutex);
627 /* Else operations to perform */
628 if (type == GB_LOOPBACK_TYPE_PING)
629 error = gb_loopback_ping(gb);
630 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
631 error = gb_loopback_transfer(gb, size);
632 else if (type == GB_LOOPBACK_TYPE_SINK)
633 error = gb_loopback_sink(gb, size);
638 gb_loopback_calculate_stats(gb);
639 gb->iteration_count++;
647 static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
654 if (kfifo_len(kfifo) == 0) {
660 retval = kfifo_out(kfifo, &latency, sizeof(latency));
662 seq_printf(s, "%u", latency);
670 static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
672 struct gb_loopback *gb = s->private;
674 return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
678 static int gb_loopback_latency_open(struct inode *inode, struct file *file)
680 return single_open(file, gb_loopback_dbgfs_latency_show,
684 static const struct file_operations gb_loopback_debugfs_latency_ops = {
685 .open = gb_loopback_latency_open,
688 .release = single_release,
691 static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha,
692 struct list_head *lhb)
694 struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry);
695 struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry);
696 struct gb_connection *ca = a->connection;
697 struct gb_connection *cb = b->connection;
699 if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id)
701 if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id)
703 if (ca->bundle->id < cb->bundle->id)
705 if (cb->bundle->id < ca->bundle->id)
707 if (ca->intf_cport_id < cb->intf_cport_id)
709 else if (cb->intf_cport_id < ca->intf_cport_id)
715 static void gb_loopback_insert_id(struct gb_loopback *gb)
717 struct gb_loopback *gb_list;
720 /* perform an insertion sort */
721 list_add_tail(&gb->entry, &gb_dev.list);
722 list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare);
723 list_for_each_entry(gb_list, &gb_dev.list, entry) {
724 gb_list->lbid = 1 << new_lbid;
729 #define DEBUGFS_NAMELEN 32
731 static int gb_loopback_connection_init(struct gb_connection *connection)
733 struct gb_loopback *gb;
735 char name[DEBUGFS_NAMELEN];
738 gb = kzalloc(sizeof(*gb), GFP_KERNEL);
742 init_waitqueue_head(&gb->wq);
743 gb_loopback_reset_stats(gb);
746 /* Calculate maximum payload */
747 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
748 if (gb_dev.size_max <=
749 sizeof(struct gb_loopback_transfer_request)) {
753 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
756 /* Create per-connection sysfs and debugfs data-points */
757 snprintf(name, sizeof(name), "raw_latency_%s",
758 dev_name(&connection->bundle->dev));
759 gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb,
760 &gb_loopback_debugfs_latency_ops);
761 gb->connection = connection;
762 connection->bundle->private = gb;
763 retval = sysfs_create_groups(&connection->bundle->dev.kobj,
769 if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
774 if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
780 /* Fork worker thread */
781 mutex_init(&gb->mutex);
782 gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
783 if (IS_ERR(gb->task)) {
784 retval = PTR_ERR(gb->task);
788 spin_lock_irqsave(&gb_dev.lock, flags);
789 gb_loopback_insert_id(gb);
791 spin_unlock_irqrestore(&gb_dev.lock, flags);
793 gb_connection_latency_tag_enable(connection);
797 kfifo_free(&gb->kfifo_ts);
799 kfifo_free(&gb->kfifo_lat);
801 sysfs_remove_groups(&connection->bundle->dev.kobj, loopback_groups);
803 debugfs_remove(gb->file);
804 connection->bundle->private = NULL;
811 static void gb_loopback_connection_exit(struct gb_connection *connection)
813 struct gb_loopback *gb = connection->bundle->private;
816 if (!IS_ERR_OR_NULL(gb->task))
817 kthread_stop(gb->task);
819 connection->bundle->private = NULL;
820 kfifo_free(&gb->kfifo_lat);
821 kfifo_free(&gb->kfifo_ts);
822 gb_connection_latency_tag_disable(connection);
823 sysfs_remove_groups(&connection->bundle->dev.kobj,
825 debugfs_remove(gb->file);
827 spin_lock_irqsave(&gb_dev.lock, flags);
829 list_del(&gb->entry);
830 spin_unlock_irqrestore(&gb_dev.lock, flags);
835 static struct gb_protocol loopback_protocol = {
837 .id = GREYBUS_PROTOCOL_LOOPBACK,
838 .major = GB_LOOPBACK_VERSION_MAJOR,
839 .minor = GB_LOOPBACK_VERSION_MINOR,
840 .connection_init = gb_loopback_connection_init,
841 .connection_exit = gb_loopback_connection_exit,
842 .request_recv = gb_loopback_request_recv,
845 static int loopback_init(void)
849 INIT_LIST_HEAD(&gb_dev.list);
850 spin_lock_init(&gb_dev.lock);
851 gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
853 retval = gb_protocol_register(&loopback_protocol);
857 debugfs_remove_recursive(gb_dev.root);
860 module_init(loopback_init);
862 static void __exit loopback_exit(void)
864 debugfs_remove_recursive(gb_dev.root);
865 gb_protocol_deregister(&loopback_protocol);
867 module_exit(loopback_exit);
869 MODULE_LICENSE("GPL v2");