2 * Greybus "AP" USB driver for "ES2" controller chips
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
7 * Released under the GPLv2 only.
9 #include <linux/kthread.h>
10 #include <linux/sizes.h>
11 #include <linux/usb.h>
12 #include <linux/kfifo.h>
13 #include <linux/debugfs.h>
14 #include <linux/list.h>
15 #include <asm/unaligned.h>
19 #include "greybus_trace.h"
20 #include "kernel_ver.h"
21 #include "connection.h"
24 /* Default timeout for USB vendor requests. */
25 #define ES2_USB_CTRL_TIMEOUT 500
27 /* Default timeout for ARPC CPort requests */
28 #define ES2_ARPC_CPORT_TIMEOUT 500
30 /* Fixed CPort numbers */
31 #define ES2_CPORT_CDSI0 16
32 #define ES2_CPORT_CDSI1 17
34 /* Memory sizes for the buffers sent to/from the ES2 controller */
35 #define ES2_GBUF_MSG_SIZE_MAX 2048
37 /* Memory sizes for the ARPC buffers */
38 #define ARPC_OUT_SIZE_MAX U16_MAX
39 #define ARPC_IN_SIZE_MAX 128
41 static const struct usb_device_id id_table[] = {
42 { USB_DEVICE(0x18d1, 0x1eaf) },
45 MODULE_DEVICE_TABLE(usb, id_table);
47 #define APB1_LOG_SIZE SZ_16K
49 /* Number of bulk in and bulk out couple */
52 /* Expected number of bulk out endpoints */
53 #define NUM_BULKS_OUT NUM_BULKS
55 /* Expected number of bulk in endpoints (including ARPC endpoint) */
56 #define NUM_BULKS_IN (NUM_BULKS + 1)
59 * Number of CPort IN urbs in flight at any point in time.
60 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
63 #define NUM_CPORT_IN_URB 4
65 /* Number of CPort OUT urbs in flight at any point in time.
66 * Adjust if we get messages saying we are out of urbs in the system log.
68 #define NUM_CPORT_OUT_URB (8 * NUM_BULKS)
71 * Number of ARPC in urbs in flight at any point in time.
73 #define NUM_ARPC_IN_URB 2
76 * @endpoint: bulk in endpoint for CPort data
77 * @urb: array of urbs for the CPort in messages
78 * @buffer: array of buffers for the @cport_in_urb urbs
82 struct urb *urb[NUM_CPORT_IN_URB];
83 u8 *buffer[NUM_CPORT_IN_URB];
87 * es2_ap_dev - ES2 USB Bridge to AP structure
88 * @usb_dev: pointer to the USB device we are.
89 * @usb_intf: pointer to the USB interface we are bound to.
90 * @hd: pointer to our gb_host_device structure
92 * @cport_in: endpoint, urbs and buffer for cport in messages
93 * @cport_out_endpoint: endpoint for for cport out messages
94 * @cport_out_urb: array of urbs for the CPort out messages
95 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
97 * @cport_out_urb_cancelled: array of flags indicating whether the
98 * corresponding @cport_out_urb is being cancelled
99 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
101 * @apb_log_task: task pointer for logging thread
102 * @apb_log_dentry: file system entry for the log file interface
103 * @apb_log_enable_dentry: file system entry for enabling logging
104 * @apb_log_fifo: kernel FIFO to carry logged data
105 * @arpc_urb: array of urbs for the ARPC in messages
106 * @arpc_buffer: array of buffers for the @arpc_urb urbs
107 * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
108 * @arpc_id_cycle: gives an unique id to ARPC
109 * @arpc_lock: locks ARPC list
110 * @arpcs: list of in progress ARPCs
113 struct usb_device *usb_dev;
114 struct usb_interface *usb_intf;
115 struct gb_host_device *hd;
117 struct es2_cport_in cport_in[NUM_BULKS];
118 __u8 cport_out_endpoint;
119 struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
120 bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
121 bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
122 spinlock_t cport_out_urb_lock;
126 struct task_struct *apb_log_task;
127 struct dentry *apb_log_dentry;
128 struct dentry *apb_log_enable_dentry;
129 DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
131 __u8 arpc_endpoint_in;
132 struct urb *arpc_urb[NUM_ARPC_IN_URB];
133 u8 *arpc_buffer[NUM_ARPC_IN_URB];
136 spinlock_t arpc_lock;
137 struct list_head arpcs;
141 * timesync_enable_request - Enable timesync in an APBridge
142 * @count: number of TimeSync Pulses to expect
143 * @frame_time: the initial FrameTime at the first TimeSync Pulse
144 * @strobe_delay: the expected delay in microseconds between each TimeSync Pulse
145 * @refclk: The AP mandated reference clock to run FrameTime at
147 struct timesync_enable_request {
155 * timesync_authoritative_request - Transmit authoritative FrameTime to APBridge
156 * @frame_time: An array of authoritative FrameTimes provided by the SVC
157 * and relayed to the APBridge by the AP
159 struct timesync_authoritative_request {
160 __le64 frame_time[GB_TIMESYNC_MAX_STROBES];
164 struct list_head list;
165 struct arpc_request_message *req;
166 struct arpc_response_message *resp;
167 struct completion response_received;
171 static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
173 return (struct es2_ap_dev *)&hd->hd_priv;
176 static void cport_out_callback(struct urb *urb);
177 static void usb_log_enable(struct es2_ap_dev *es2);
178 static void usb_log_disable(struct es2_ap_dev *es2);
179 static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
180 size_t size, int *result, unsigned int timeout);
182 static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
184 struct usb_device *udev = es2->usb_dev;
188 data = kmalloc(size, GFP_KERNEL);
191 memcpy(data, req, size);
193 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
195 USB_DIR_OUT | USB_TYPE_VENDOR |
197 0, 0, data, size, ES2_USB_CTRL_TIMEOUT);
199 dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
207 static void ap_urb_complete(struct urb *urb)
209 struct usb_ctrlrequest *dr = urb->context;
215 static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
217 struct usb_device *udev = es2->usb_dev;
219 struct usb_ctrlrequest *dr;
223 urb = usb_alloc_urb(0, GFP_ATOMIC);
227 dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
233 buf = (u8 *)dr + sizeof(*dr);
234 memcpy(buf, req, size);
237 dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
240 dr->wLength = cpu_to_le16(size);
242 usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
243 (unsigned char *)dr, buf, size,
244 ap_urb_complete, dr);
245 retval = usb_submit_urb(urb, GFP_ATOMIC);
253 static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
256 struct es2_ap_dev *es2 = hd_to_es2(hd);
259 return output_async(es2, req, size, cmd);
261 return output_sync(es2, req, size, cmd);
264 static int es2_cport_in_enable(struct es2_ap_dev *es2,
265 struct es2_cport_in *cport_in)
271 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
272 urb = cport_in->urb[i];
274 ret = usb_submit_urb(urb, GFP_KERNEL);
276 dev_err(&es2->usb_dev->dev,
277 "failed to submit in-urb: %d\n", ret);
285 for (--i; i >= 0; --i) {
286 urb = cport_in->urb[i];
293 static void es2_cport_in_disable(struct es2_ap_dev *es2,
294 struct es2_cport_in *cport_in)
299 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
300 urb = cport_in->urb[i];
305 static int es2_arpc_in_enable(struct es2_ap_dev *es2)
311 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
312 urb = es2->arpc_urb[i];
314 ret = usb_submit_urb(urb, GFP_KERNEL);
316 dev_err(&es2->usb_dev->dev,
317 "failed to submit arpc in-urb: %d\n", ret);
325 for (--i; i >= 0; --i) {
326 urb = es2->arpc_urb[i];
333 static void es2_arpc_in_disable(struct es2_ap_dev *es2)
338 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
339 urb = es2->arpc_urb[i];
344 static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
346 struct urb *urb = NULL;
350 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
352 /* Look in our pool of allocated urbs first, as that's the "fastest" */
353 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
354 if (es2->cport_out_urb_busy[i] == false &&
355 es2->cport_out_urb_cancelled[i] == false) {
356 es2->cport_out_urb_busy[i] = true;
357 urb = es2->cport_out_urb[i];
361 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
366 * Crap, pool is empty, complain to the syslog and go allocate one
367 * dynamically as we have to succeed.
369 dev_dbg(&es2->usb_dev->dev,
370 "No free CPort OUT urbs, having to dynamically allocate one!\n");
371 return usb_alloc_urb(0, gfp_mask);
374 static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
379 * See if this was an urb in our pool, if so mark it "free", otherwise
380 * we need to free it ourselves.
382 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
383 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
384 if (urb == es2->cport_out_urb[i]) {
385 es2->cport_out_urb_busy[i] = false;
390 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
392 /* If urb is not NULL, then we need to free this urb */
397 * We (ab)use the operation-message header pad bytes to transfer the
398 * cport id in order to minimise overhead.
401 gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
403 header->pad[0] = cport_id;
406 /* Clear the pad bytes used for the CPort id */
407 static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
412 /* Extract the CPort id packed into the header, and clear it */
413 static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
415 u16 cport_id = header->pad[0];
417 gb_message_cport_clear(header);
423 * Returns zero if the message was successfully queued, or a negative errno
426 static int message_send(struct gb_host_device *hd, u16 cport_id,
427 struct gb_message *message, gfp_t gfp_mask)
429 struct es2_ap_dev *es2 = hd_to_es2(hd);
430 struct usb_device *udev = es2->usb_dev;
437 * The data actually transferred will include an indication
438 * of where the data should be sent. Do one last check of
439 * the target CPort id before filling it in.
441 if (!cport_id_valid(hd, cport_id)) {
442 dev_err(&udev->dev, "invalid cport %u\n", cport_id);
446 /* Find a free urb */
447 urb = next_free_urb(es2, gfp_mask);
451 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
452 message->hcpriv = urb;
453 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
455 /* Pack the cport id into the message header */
456 gb_message_cport_pack(message->header, cport_id);
458 buffer_size = sizeof(*message->header) + message->payload_size;
460 usb_fill_bulk_urb(urb, udev,
461 usb_sndbulkpipe(udev,
462 es2->cport_out_endpoint),
463 message->buffer, buffer_size,
464 cport_out_callback, message);
465 urb->transfer_flags |= URB_ZERO_PACKET;
467 trace_gb_message_submit(message);
469 retval = usb_submit_urb(urb, gfp_mask);
471 dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
473 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
474 message->hcpriv = NULL;
475 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
478 gb_message_cport_clear(message->header);
487 * Can not be called in atomic context.
489 static void message_cancel(struct gb_message *message)
491 struct gb_host_device *hd = message->operation->connection->hd;
492 struct es2_ap_dev *es2 = hd_to_es2(hd);
498 spin_lock_irq(&es2->cport_out_urb_lock);
499 urb = message->hcpriv;
501 /* Prevent dynamically allocated urb from being deallocated. */
504 /* Prevent pre-allocated urb from being reused. */
505 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
506 if (urb == es2->cport_out_urb[i]) {
507 es2->cport_out_urb_cancelled[i] = true;
511 spin_unlock_irq(&es2->cport_out_urb_lock);
515 if (i < NUM_CPORT_OUT_URB) {
516 spin_lock_irq(&es2->cport_out_urb_lock);
517 es2->cport_out_urb_cancelled[i] = false;
518 spin_unlock_irq(&es2->cport_out_urb_lock);
524 static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
527 struct es2_ap_dev *es2 = hd_to_es2(hd);
528 struct ida *id_map = &hd->cport_id_map;
529 int ida_start, ida_end;
532 case ES2_CPORT_CDSI0:
533 case ES2_CPORT_CDSI1:
534 dev_err(&hd->dev, "cport %d not available\n", cport_id);
538 if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
539 flags & GB_CONNECTION_FLAG_CDSI1) {
540 if (es2->cdsi1_in_use) {
541 dev_err(&hd->dev, "CDSI1 already in use\n");
545 es2->cdsi1_in_use = true;
547 return ES2_CPORT_CDSI1;
552 ida_end = hd->num_cports;
553 } else if (cport_id < hd->num_cports) {
554 ida_start = cport_id;
555 ida_end = cport_id + 1;
557 dev_err(&hd->dev, "cport %d not available\n", cport_id);
561 return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
564 static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
566 struct es2_ap_dev *es2 = hd_to_es2(hd);
569 case ES2_CPORT_CDSI1:
570 es2->cdsi1_in_use = false;
574 ida_simple_remove(&hd->cport_id_map, cport_id);
577 static int cport_enable(struct gb_host_device *hd, u16 cport_id,
580 struct es2_ap_dev *es2 = hd_to_es2(hd);
581 struct usb_device *udev = es2->usb_dev;
582 struct gb_apb_request_cport_flags *req;
583 u32 connection_flags;
586 req = kzalloc(sizeof(*req), GFP_KERNEL);
590 connection_flags = 0;
591 if (flags & GB_CONNECTION_FLAG_CONTROL)
592 connection_flags |= GB_APB_CPORT_FLAG_CONTROL;
593 if (flags & GB_CONNECTION_FLAG_HIGH_PRIO)
594 connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO;
596 req->flags = cpu_to_le32(connection_flags);
598 dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__,
599 cport_id, connection_flags);
601 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
602 GB_APB_REQUEST_CPORT_FLAGS,
603 USB_DIR_OUT | USB_TYPE_VENDOR |
604 USB_RECIP_INTERFACE, cport_id, 0,
605 req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
606 if (ret != sizeof(*req)) {
607 dev_err(&udev->dev, "failed to set cport flags for port %d\n",
622 static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id)
624 struct es2_ap_dev *es2 = hd_to_es2(hd);
625 struct device *dev = &es2->usb_dev->dev;
626 struct arpc_cport_connected_req req;
629 req.cport_id = cpu_to_le16(cport_id);
630 ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req),
631 NULL, ES2_ARPC_CPORT_TIMEOUT);
633 dev_err(dev, "failed to set connected state for cport %u: %d\n",
641 static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id)
643 struct es2_ap_dev *es2 = hd_to_es2(hd);
644 struct device *dev = &es2->usb_dev->dev;
645 struct arpc_cport_flush_req req;
648 req.cport_id = cpu_to_le16(cport_id);
649 ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req),
650 NULL, ES2_ARPC_CPORT_TIMEOUT);
652 dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret);
659 static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id,
660 u8 phase, unsigned int timeout)
662 struct es2_ap_dev *es2 = hd_to_es2(hd);
663 struct device *dev = &es2->usb_dev->dev;
664 struct arpc_cport_shutdown_req req;
668 if (timeout > U16_MAX)
671 req.cport_id = cpu_to_le16(cport_id);
672 req.timeout = cpu_to_le16(timeout);
674 ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req),
675 &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
677 dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n",
678 cport_id, ret, result);
685 static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id,
686 size_t peer_space, unsigned int timeout)
688 struct es2_ap_dev *es2 = hd_to_es2(hd);
689 struct device *dev = &es2->usb_dev->dev;
690 struct arpc_cport_quiesce_req req;
694 if (peer_space > U16_MAX)
697 if (timeout > U16_MAX)
700 req.cport_id = cpu_to_le16(cport_id);
701 req.peer_space = cpu_to_le16(peer_space);
702 req.timeout = cpu_to_le16(timeout);
703 ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req),
704 &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
706 dev_err(dev, "failed to quiesce cport %u: %d (%d)\n",
707 cport_id, ret, result);
714 static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id)
716 struct es2_ap_dev *es2 = hd_to_es2(hd);
717 struct device *dev = &es2->usb_dev->dev;
718 struct arpc_cport_clear_req req;
721 req.cport_id = cpu_to_le16(cport_id);
722 ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req),
723 NULL, ES2_ARPC_CPORT_TIMEOUT);
725 dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret);
732 static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
735 struct es2_ap_dev *es2 = hd_to_es2(hd);
736 struct usb_device *udev = es2->usb_dev;
738 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
739 GB_APB_REQUEST_LATENCY_TAG_EN,
740 USB_DIR_OUT | USB_TYPE_VENDOR |
741 USB_RECIP_INTERFACE, cport_id, 0, NULL,
742 0, ES2_USB_CTRL_TIMEOUT);
745 dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
750 static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
753 struct es2_ap_dev *es2 = hd_to_es2(hd);
754 struct usb_device *udev = es2->usb_dev;
756 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
757 GB_APB_REQUEST_LATENCY_TAG_DIS,
758 USB_DIR_OUT | USB_TYPE_VENDOR |
759 USB_RECIP_INTERFACE, cport_id, 0, NULL,
760 0, ES2_USB_CTRL_TIMEOUT);
763 dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
768 static int timesync_enable(struct gb_host_device *hd, u8 count,
769 u64 frame_time, u32 strobe_delay, u32 refclk)
772 struct es2_ap_dev *es2 = hd_to_es2(hd);
773 struct usb_device *udev = es2->usb_dev;
774 struct gb_control_timesync_enable_request *request;
776 request = kzalloc(sizeof(*request), GFP_KERNEL);
780 request->count = count;
781 request->frame_time = cpu_to_le64(frame_time);
782 request->strobe_delay = cpu_to_le32(strobe_delay);
783 request->refclk = cpu_to_le32(refclk);
784 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
785 GB_APB_REQUEST_TIMESYNC_ENABLE,
786 USB_DIR_OUT | USB_TYPE_VENDOR |
787 USB_RECIP_INTERFACE, 0, 0, request,
788 sizeof(*request), ES2_USB_CTRL_TIMEOUT);
790 dev_err(&udev->dev, "Cannot enable timesync %d\n", retval);
796 static int timesync_disable(struct gb_host_device *hd)
799 struct es2_ap_dev *es2 = hd_to_es2(hd);
800 struct usb_device *udev = es2->usb_dev;
802 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
803 GB_APB_REQUEST_TIMESYNC_DISABLE,
804 USB_DIR_OUT | USB_TYPE_VENDOR |
805 USB_RECIP_INTERFACE, 0, 0, NULL,
806 0, ES2_USB_CTRL_TIMEOUT);
808 dev_err(&udev->dev, "Cannot disable timesync %d\n", retval);
813 static int timesync_authoritative(struct gb_host_device *hd, u64 *frame_time)
816 struct es2_ap_dev *es2 = hd_to_es2(hd);
817 struct usb_device *udev = es2->usb_dev;
818 struct timesync_authoritative_request *request;
820 request = kzalloc(sizeof(*request), GFP_KERNEL);
824 for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
825 request->frame_time[i] = cpu_to_le64(frame_time[i]);
827 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
828 GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE,
829 USB_DIR_OUT | USB_TYPE_VENDOR |
830 USB_RECIP_INTERFACE, 0, 0, request,
831 sizeof(*request), ES2_USB_CTRL_TIMEOUT);
833 dev_err(&udev->dev, "Cannot timesync authoritative out %d\n", retval);
839 static int timesync_get_last_event(struct gb_host_device *hd, u64 *frame_time)
842 struct es2_ap_dev *es2 = hd_to_es2(hd);
843 struct usb_device *udev = es2->usb_dev;
844 __le64 *response_frame_time;
846 response_frame_time = kzalloc(sizeof(*response_frame_time), GFP_KERNEL);
847 if (!response_frame_time)
850 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
851 GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT,
852 USB_DIR_IN | USB_TYPE_VENDOR |
853 USB_RECIP_INTERFACE, 0, 0, response_frame_time,
854 sizeof(*response_frame_time),
855 ES2_USB_CTRL_TIMEOUT);
857 if (retval != sizeof(*response_frame_time)) {
858 dev_err(&udev->dev, "Cannot get last TimeSync event: %d\n",
866 *frame_time = le64_to_cpu(*response_frame_time);
869 kfree(response_frame_time);
873 static struct gb_hd_driver es2_driver = {
874 .hd_priv_size = sizeof(struct es2_ap_dev),
875 .message_send = message_send,
876 .message_cancel = message_cancel,
877 .cport_allocate = es2_cport_allocate,
878 .cport_release = es2_cport_release,
879 .cport_enable = cport_enable,
880 .cport_connected = es2_cport_connected,
881 .cport_flush = es2_cport_flush,
882 .cport_shutdown = es2_cport_shutdown,
883 .cport_quiesce = es2_cport_quiesce,
884 .cport_clear = es2_cport_clear,
885 .latency_tag_enable = latency_tag_enable,
886 .latency_tag_disable = latency_tag_disable,
888 .timesync_enable = timesync_enable,
889 .timesync_disable = timesync_disable,
890 .timesync_authoritative = timesync_authoritative,
891 .timesync_get_last_event = timesync_get_last_event,
894 /* Common function to report consistent warnings based on URB status */
895 static int check_urb_status(struct urb *urb)
897 struct device *dev = &urb->dev->dev;
898 int status = urb->status;
905 dev_err(dev, "%s: overflow actual length is %d\n",
906 __func__, urb->actual_length);
912 /* device is gone, stop sending */
915 dev_err(dev, "%s: unknown status %d\n", __func__, status);
920 static void es2_destroy(struct es2_ap_dev *es2)
922 struct usb_device *udev;
926 debugfs_remove(es2->apb_log_enable_dentry);
927 usb_log_disable(es2);
929 /* Tear down everything! */
930 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
931 struct urb *urb = es2->cport_out_urb[i];
937 es2->cport_out_urb[i] = NULL;
938 es2->cport_out_urb_busy[i] = false; /* just to be anal */
941 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
942 struct urb *urb = es2->arpc_urb[i];
947 kfree(es2->arpc_buffer[i]);
948 es2->arpc_buffer[i] = NULL;
951 for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
952 struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];
954 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
955 struct urb *urb = cport_in->urb[i];
960 kfree(cport_in->buffer[i]);
961 cport_in->buffer[i] = NULL;
965 /* release reserved CDSI0 and CDSI1 cports */
966 gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
967 gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);
975 static void cport_in_callback(struct urb *urb)
977 struct gb_host_device *hd = urb->context;
978 struct device *dev = &urb->dev->dev;
979 struct gb_operation_msg_hdr *header;
980 int status = check_urb_status(urb);
985 if ((status == -EAGAIN) || (status == -EPROTO))
988 /* The urb is being unlinked */
989 if (status == -ENOENT || status == -ESHUTDOWN)
992 dev_err(dev, "urb cport in error %d (dropped)\n", status);
996 if (urb->actual_length < sizeof(*header)) {
997 dev_err(dev, "short message received\n");
1001 /* Extract the CPort id, which is packed in the message header */
1002 header = urb->transfer_buffer;
1003 cport_id = gb_message_cport_unpack(header);
1005 if (cport_id_valid(hd, cport_id)) {
1006 greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
1007 urb->actual_length);
1009 dev_err(dev, "invalid cport id %u received\n", cport_id);
1012 /* put our urb back in the request pool */
1013 retval = usb_submit_urb(urb, GFP_ATOMIC);
1015 dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
1018 static void cport_out_callback(struct urb *urb)
1020 struct gb_message *message = urb->context;
1021 struct gb_host_device *hd = message->operation->connection->hd;
1022 struct es2_ap_dev *es2 = hd_to_es2(hd);
1023 int status = check_urb_status(urb);
1024 unsigned long flags;
1026 gb_message_cport_clear(message->header);
1028 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
1029 message->hcpriv = NULL;
1030 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
1033 * Tell the submitter that the message send (attempt) is
1034 * complete, and report the status.
1036 greybus_message_sent(hd, message, status);
1041 static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
1045 if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX)
1048 rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
1052 INIT_LIST_HEAD(&rpc->list);
1053 rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL);
1057 rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL);
1061 rpc->req->type = type;
1062 rpc->req->size = cpu_to_le16(sizeof(rpc->req) + size);
1063 memcpy(rpc->req->data, payload, size);
1065 init_completion(&rpc->response_received);
1077 static void arpc_free(struct arpc *rpc)
1084 static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id)
1088 list_for_each_entry(rpc, &es2->arpcs, list) {
1089 if (rpc->req->id == id)
1096 static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc)
1099 rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++);
1100 list_add_tail(&rpc->list, &es2->arpcs);
1103 static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc)
1106 rpc->active = false;
1107 list_del(&rpc->list);
1111 static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout)
1113 struct usb_device *udev = es2->usb_dev;
1116 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1117 GB_APB_REQUEST_ARPC_RUN,
1118 USB_DIR_OUT | USB_TYPE_VENDOR |
1119 USB_RECIP_INTERFACE,
1121 rpc->req, le16_to_cpu(rpc->req->size),
1122 ES2_USB_CTRL_TIMEOUT);
1123 if (retval != le16_to_cpu(rpc->req->size)) {
1125 "failed to send ARPC request %d: %d\n",
1126 rpc->req->type, retval);
1135 static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
1136 size_t size, int *result, unsigned int timeout)
1139 unsigned long flags;
1145 rpc = arpc_alloc(payload, size, type);
1149 spin_lock_irqsave(&es2->arpc_lock, flags);
1151 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1153 retval = arpc_send(es2, rpc, timeout);
1157 retval = wait_for_completion_interruptible_timeout(
1158 &rpc->response_received,
1159 msecs_to_jiffies(timeout));
1162 retval = -ETIMEDOUT;
1166 if (rpc->resp->result) {
1167 retval = -EREMOTEIO;
1169 *result = rpc->resp->result;
1175 spin_lock_irqsave(&es2->arpc_lock, flags);
1177 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1180 if (retval < 0 && retval != -EREMOTEIO) {
1181 dev_err(&es2->usb_dev->dev,
1182 "failed to execute ARPC: %d\n", retval);
1188 static void arpc_in_callback(struct urb *urb)
1190 struct es2_ap_dev *es2 = urb->context;
1191 struct device *dev = &urb->dev->dev;
1192 int status = check_urb_status(urb);
1194 struct arpc_response_message *resp;
1195 unsigned long flags;
1199 if ((status == -EAGAIN) || (status == -EPROTO))
1202 /* The urb is being unlinked */
1203 if (status == -ENOENT || status == -ESHUTDOWN)
1206 dev_err(dev, "arpc in-urb error %d (dropped)\n", status);
1210 if (urb->actual_length < sizeof(*resp)) {
1211 dev_err(dev, "short aprc response received\n");
1215 resp = urb->transfer_buffer;
1216 spin_lock_irqsave(&es2->arpc_lock, flags);
1217 rpc = arpc_find(es2, resp->id);
1219 dev_err(dev, "invalid arpc response id received: %u\n",
1220 le16_to_cpu(resp->id));
1221 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1226 memcpy(rpc->resp, resp, sizeof(*resp));
1227 complete(&rpc->response_received);
1228 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1231 /* put our urb back in the request pool */
1232 retval = usb_submit_urb(urb, GFP_ATOMIC);
1234 dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval);
1237 #define APB1_LOG_MSG_SIZE 64
1238 static void apb_log_get(struct es2_ap_dev *es2, char *buf)
1243 retval = usb_control_msg(es2->usb_dev,
1244 usb_rcvctrlpipe(es2->usb_dev, 0),
1246 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1250 ES2_USB_CTRL_TIMEOUT);
1252 kfifo_in(&es2->apb_log_fifo, buf, retval);
1253 } while (retval > 0);
1256 static int apb_log_poll(void *data)
1258 struct es2_ap_dev *es2 = data;
1261 buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
1265 while (!kthread_should_stop()) {
1267 apb_log_get(es2, buf);
1275 static ssize_t apb_log_read(struct file *f, char __user *buf,
1276 size_t count, loff_t *ppos)
1278 struct es2_ap_dev *es2 = f->f_inode->i_private;
1283 if (count > APB1_LOG_SIZE)
1284 count = APB1_LOG_SIZE;
1286 tmp_buf = kmalloc(count, GFP_KERNEL);
1290 copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
1291 ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
1298 static const struct file_operations apb_log_fops = {
1299 .read = apb_log_read,
1302 static void usb_log_enable(struct es2_ap_dev *es2)
1304 if (!IS_ERR_OR_NULL(es2->apb_log_task))
1307 /* get log from APB1 */
1308 es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
1309 if (IS_ERR(es2->apb_log_task))
1311 /* XXX We will need to rename this per APB */
1312 es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
1313 gb_debugfs_get(), es2,
1317 static void usb_log_disable(struct es2_ap_dev *es2)
1319 if (IS_ERR_OR_NULL(es2->apb_log_task))
1322 debugfs_remove(es2->apb_log_dentry);
1323 es2->apb_log_dentry = NULL;
1325 kthread_stop(es2->apb_log_task);
1326 es2->apb_log_task = NULL;
1329 static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
1330 size_t count, loff_t *ppos)
1332 struct es2_ap_dev *es2 = f->f_inode->i_private;
1333 int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
1336 sprintf(tmp_buf, "%d\n", enable);
1337 return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
1340 static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
1341 size_t count, loff_t *ppos)
1345 struct es2_ap_dev *es2 = f->f_inode->i_private;
1347 retval = kstrtoint_from_user(buf, count, 10, &enable);
1352 usb_log_enable(es2);
1354 usb_log_disable(es2);
1359 static const struct file_operations apb_log_enable_fops = {
1360 .read = apb_log_enable_read,
1361 .write = apb_log_enable_write,
1364 static int apb_get_cport_count(struct usb_device *udev)
1367 __le16 *cport_count;
1369 cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
1373 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1374 GB_APB_REQUEST_CPORT_COUNT,
1375 USB_DIR_IN | USB_TYPE_VENDOR |
1376 USB_RECIP_INTERFACE, 0, 0, cport_count,
1377 sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT);
1378 if (retval != sizeof(*cport_count)) {
1379 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1388 retval = le16_to_cpu(*cport_count);
1390 /* We need to fit a CPort ID in one byte of a message header */
1391 if (retval > U8_MAX) {
1393 dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
1402 * The ES2 USB Bridge device has 15 endpoints
1403 * 1 Control - usual USB stuff + AP -> APBridgeA messages
1404 * 7 Bulk IN - CPort data in
1405 * 7 Bulk OUT - CPort data out
1407 static int ap_probe(struct usb_interface *interface,
1408 const struct usb_device_id *id)
1410 struct es2_ap_dev *es2;
1411 struct gb_host_device *hd;
1412 struct usb_device *udev;
1413 struct usb_host_interface *iface_desc;
1414 struct usb_endpoint_descriptor *endpoint;
1419 bool bulk_out_found = false;
1421 udev = usb_get_dev(interface_to_usbdev(interface));
1423 num_cports = apb_get_cport_count(udev);
1424 if (num_cports < 0) {
1426 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1431 hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
1438 es2 = hd_to_es2(hd);
1440 es2->usb_intf = interface;
1441 es2->usb_dev = udev;
1442 spin_lock_init(&es2->cport_out_urb_lock);
1443 INIT_KFIFO(es2->apb_log_fifo);
1444 usb_set_intfdata(interface, es2);
1447 * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
1450 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
1453 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
1457 /* find all bulk endpoints */
1458 iface_desc = interface->cur_altsetting;
1459 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1460 endpoint = &iface_desc->endpoint[i].desc;
1462 if (usb_endpoint_is_bulk_in(endpoint)) {
1463 if (bulk_in < NUM_BULKS)
1464 es2->cport_in[bulk_in].endpoint =
1465 endpoint->bEndpointAddress;
1467 es2->arpc_endpoint_in =
1468 endpoint->bEndpointAddress;
1470 } else if (usb_endpoint_is_bulk_out(endpoint) &&
1471 (!bulk_out_found)) {
1472 es2->cport_out_endpoint = endpoint->bEndpointAddress;
1473 bulk_out_found = true;
1476 "Unknown endpoint type found, address 0x%02x\n",
1477 endpoint->bEndpointAddress);
1480 if (bulk_in != NUM_BULKS_IN || !bulk_out_found) {
1481 dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
1486 /* Allocate buffers for our cport in messages */
1487 for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
1488 struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];
1490 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
1494 urb = usb_alloc_urb(0, GFP_KERNEL);
1499 cport_in->urb[i] = urb;
1501 buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
1507 usb_fill_bulk_urb(urb, udev,
1508 usb_rcvbulkpipe(udev,
1509 cport_in->endpoint),
1510 buffer, ES2_GBUF_MSG_SIZE_MAX,
1511 cport_in_callback, hd);
1513 cport_in->buffer[i] = buffer;
1517 /* Allocate buffers for ARPC in messages */
1518 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
1522 urb = usb_alloc_urb(0, GFP_KERNEL);
1527 es2->arpc_urb[i] = urb;
1529 buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL);
1535 usb_fill_bulk_urb(urb, udev,
1536 usb_rcvbulkpipe(udev,
1537 es2->arpc_endpoint_in),
1538 buffer, ARPC_IN_SIZE_MAX,
1539 arpc_in_callback, es2);
1541 es2->arpc_buffer[i] = buffer;
1544 /* Allocate urbs for our CPort OUT messages */
1545 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
1548 urb = usb_alloc_urb(0, GFP_KERNEL);
1554 es2->cport_out_urb[i] = urb;
1555 es2->cport_out_urb_busy[i] = false; /* just to be anal */
1558 /* XXX We will need to rename this per APB */
1559 es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
1560 (S_IWUSR | S_IRUGO),
1561 gb_debugfs_get(), es2,
1562 &apb_log_enable_fops);
1564 INIT_LIST_HEAD(&es2->arpcs);
1565 spin_lock_init(&es2->arpc_lock);
1567 if (es2_arpc_in_enable(es2))
1570 retval = gb_hd_add(hd);
1572 goto err_disable_arpc_in;
1574 for (i = 0; i < NUM_BULKS; ++i) {
1575 retval = es2_cport_in_enable(es2, &es2->cport_in[i]);
1577 goto err_disable_cport_in;
1582 err_disable_cport_in:
1583 for (--i; i >= 0; --i)
1584 es2_cport_in_disable(es2, &es2->cport_in[i]);
1586 err_disable_arpc_in:
1587 es2_arpc_in_disable(es2);
1594 static void ap_disconnect(struct usb_interface *interface)
1596 struct es2_ap_dev *es2 = usb_get_intfdata(interface);
1601 for (i = 0; i < NUM_BULKS; ++i)
1602 es2_cport_in_disable(es2, &es2->cport_in[i]);
1603 es2_arpc_in_disable(es2);
1608 static struct usb_driver es2_ap_driver = {
1609 .name = "es2_ap_driver",
1611 .disconnect = ap_disconnect,
1612 .id_table = id_table,
1616 module_usb_driver(es2_ap_driver);
1618 MODULE_LICENSE("GPL v2");
1619 MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");