2 * Greybus "AP" USB driver for "ES2" controller chips
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
7 * Released under the GPLv2 only.
9 #include <linux/kthread.h>
10 #include <linux/sizes.h>
11 #include <linux/usb.h>
12 #include <linux/kfifo.h>
13 #include <linux/debugfs.h>
14 #include <linux/list.h>
15 #include <asm/unaligned.h>
19 #include "greybus_trace.h"
20 #include "kernel_ver.h"
21 #include "connection.h"
24 /* Default timeout for USB vendor requests. */
25 #define ES2_USB_CTRL_TIMEOUT 500
27 /* Default timeout for ARPC CPort requests */
28 #define ES2_ARPC_CPORT_TIMEOUT 500
30 /* Fixed CPort numbers */
31 #define ES2_CPORT_CDSI0 16
32 #define ES2_CPORT_CDSI1 17
34 /* Memory sizes for the buffers sent to/from the ES2 controller */
35 #define ES2_GBUF_MSG_SIZE_MAX 2048
37 /* Memory sizes for the ARPC buffers */
38 #define ARPC_OUT_SIZE_MAX U16_MAX
39 #define ARPC_IN_SIZE_MAX 128
41 static const struct usb_device_id id_table[] = {
42 { USB_DEVICE(0x18d1, 0x1eaf) },
45 MODULE_DEVICE_TABLE(usb, id_table);
47 #define APB1_LOG_SIZE SZ_16K
49 /* Number of bulk in and bulk out couple */
52 /* Expected number of bulk out endpoints */
53 #define NUM_BULKS_OUT NUM_BULKS
55 /* Expected number of bulk in endpoints (including ARPC endpoint) */
56 #define NUM_BULKS_IN (NUM_BULKS + 1)
59 * Number of CPort IN urbs in flight at any point in time.
60 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
63 #define NUM_CPORT_IN_URB 4
65 /* Number of CPort OUT urbs in flight at any point in time.
66 * Adjust if we get messages saying we are out of urbs in the system log.
68 #define NUM_CPORT_OUT_URB (8 * NUM_BULKS)
71 * Number of ARPC in urbs in flight at any point in time.
73 #define NUM_ARPC_IN_URB 2
76 * @endpoint: bulk in endpoint for CPort data
77 * @urb: array of urbs for the CPort in messages
78 * @buffer: array of buffers for the @cport_in_urb urbs
82 struct urb *urb[NUM_CPORT_IN_URB];
83 u8 *buffer[NUM_CPORT_IN_URB];
87 * @endpoint: bulk out endpoint for CPort data
89 struct es2_cport_out {
94 * es2_ap_dev - ES2 USB Bridge to AP structure
95 * @usb_dev: pointer to the USB device we are.
96 * @usb_intf: pointer to the USB interface we are bound to.
97 * @hd: pointer to our gb_host_device structure
99 * @cport_in: endpoint, urbs and buffer for cport in messages
100 * @cport_out: endpoint for for cport out messages
101 * @cport_out_urb: array of urbs for the CPort out messages
102 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
104 * @cport_out_urb_cancelled: array of flags indicating whether the
105 * corresponding @cport_out_urb is being cancelled
106 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
108 * @apb_log_task: task pointer for logging thread
109 * @apb_log_dentry: file system entry for the log file interface
110 * @apb_log_enable_dentry: file system entry for enabling logging
111 * @apb_log_fifo: kernel FIFO to carry logged data
112 * @arpc_urb: array of urbs for the ARPC in messages
113 * @arpc_buffer: array of buffers for the @arpc_urb urbs
114 * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
115 * @arpc_id_cycle: gives an unique id to ARPC
116 * @arpc_lock: locks ARPC list
117 * @arpcs: list of in progress ARPCs
120 struct usb_device *usb_dev;
121 struct usb_interface *usb_intf;
122 struct gb_host_device *hd;
124 struct es2_cport_in cport_in[NUM_BULKS];
125 struct es2_cport_out cport_out[NUM_BULKS];
126 struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
127 bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
128 bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
129 spinlock_t cport_out_urb_lock;
135 struct task_struct *apb_log_task;
136 struct dentry *apb_log_dentry;
137 struct dentry *apb_log_enable_dentry;
138 DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
140 __u8 arpc_endpoint_in;
141 struct urb *arpc_urb[NUM_ARPC_IN_URB];
142 u8 *arpc_buffer[NUM_ARPC_IN_URB];
145 spinlock_t arpc_lock;
146 struct list_head arpcs;
150 * cport_to_ep - information about cport to endpoints mapping
151 * @cport_id: the id of cport to map to endpoints
152 * @endpoint_in: the endpoint number to use for in transfer
153 * @endpoint_out: he endpoint number to use for out transfer
162 * timesync_enable_request - Enable timesync in an APBridge
163 * @count: number of TimeSync Pulses to expect
164 * @frame_time: the initial FrameTime at the first TimeSync Pulse
165 * @strobe_delay: the expected delay in microseconds between each TimeSync Pulse
166 * @refclk: The AP mandated reference clock to run FrameTime at
168 struct timesync_enable_request {
176 * timesync_authoritative_request - Transmit authoritative FrameTime to APBridge
177 * @frame_time: An array of authoritative FrameTimes provided by the SVC
178 * and relayed to the APBridge by the AP
180 struct timesync_authoritative_request {
181 __le64 frame_time[GB_TIMESYNC_MAX_STROBES];
185 struct list_head list;
186 struct arpc_request_message *req;
187 struct arpc_response_message *resp;
188 struct completion response_received;
192 static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
194 return (struct es2_ap_dev *)&hd->hd_priv;
197 static void cport_out_callback(struct urb *urb);
198 static void usb_log_enable(struct es2_ap_dev *es2);
199 static void usb_log_disable(struct es2_ap_dev *es2);
200 static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
201 size_t size, int *result, unsigned int timeout);
203 /* Get the endpoints pair mapped to the cport */
204 static int cport_to_ep_pair(struct es2_ap_dev *es2, u16 cport_id)
206 if (cport_id >= es2->hd->num_cports)
208 return es2->cport_to_ep[cport_id];
211 /* Disable for now until we work all of this out to keep a warning-free build */
213 /* Test if the endpoints pair is already mapped to a cport */
214 static int ep_pair_in_use(struct es2_ap_dev *es2, int ep_pair)
218 for (i = 0; i < es2->hd->num_cports; i++) {
219 if (es2->cport_to_ep[i] == ep_pair)
225 /* Configure the endpoint mapping and send the request to APBridge */
226 static int map_cport_to_ep(struct es2_ap_dev *es2,
227 u16 cport_id, int ep_pair)
230 struct cport_to_ep *cport_to_ep;
232 if (ep_pair < 0 || ep_pair >= NUM_BULKS)
234 if (cport_id >= es2->hd->num_cports)
236 if (ep_pair && ep_pair_in_use(es2, ep_pair))
239 cport_to_ep = kmalloc(sizeof(*cport_to_ep), GFP_KERNEL);
243 es2->cport_to_ep[cport_id] = ep_pair;
244 cport_to_ep->cport_id = cpu_to_le16(cport_id);
245 cport_to_ep->endpoint_in = es2->cport_in[ep_pair].endpoint;
246 cport_to_ep->endpoint_out = es2->cport_out[ep_pair].endpoint;
248 retval = usb_control_msg(es2->usb_dev,
249 usb_sndctrlpipe(es2->usb_dev, 0),
250 GB_APB_REQUEST_EP_MAPPING,
251 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
254 sizeof(*cport_to_ep),
255 ES2_USB_CTRL_TIMEOUT);
256 if (retval == sizeof(*cport_to_ep))
263 /* Unmap a cport: use the muxed endpoints pair */
264 static int unmap_cport(struct es2_ap_dev *es2, u16 cport_id)
266 return map_cport_to_ep(es2, cport_id, 0);
270 static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
272 struct usb_device *udev = es2->usb_dev;
276 data = kmalloc(size, GFP_KERNEL);
279 memcpy(data, req, size);
281 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
283 USB_DIR_OUT | USB_TYPE_VENDOR |
285 0, 0, data, size, ES2_USB_CTRL_TIMEOUT);
287 dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
295 static void ap_urb_complete(struct urb *urb)
297 struct usb_ctrlrequest *dr = urb->context;
303 static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
305 struct usb_device *udev = es2->usb_dev;
307 struct usb_ctrlrequest *dr;
311 urb = usb_alloc_urb(0, GFP_ATOMIC);
315 dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
321 buf = (u8 *)dr + sizeof(*dr);
322 memcpy(buf, req, size);
325 dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
328 dr->wLength = cpu_to_le16(size);
330 usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
331 (unsigned char *)dr, buf, size,
332 ap_urb_complete, dr);
333 retval = usb_submit_urb(urb, GFP_ATOMIC);
341 static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
344 struct es2_ap_dev *es2 = hd_to_es2(hd);
347 return output_async(es2, req, size, cmd);
349 return output_sync(es2, req, size, cmd);
352 static int es2_cport_in_enable(struct es2_ap_dev *es2,
353 struct es2_cport_in *cport_in)
359 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
360 urb = cport_in->urb[i];
362 ret = usb_submit_urb(urb, GFP_KERNEL);
364 dev_err(&es2->usb_dev->dev,
365 "failed to submit in-urb: %d\n", ret);
373 for (--i; i >= 0; --i) {
374 urb = cport_in->urb[i];
381 static void es2_cport_in_disable(struct es2_ap_dev *es2,
382 struct es2_cport_in *cport_in)
387 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
388 urb = cport_in->urb[i];
393 static int es2_arpc_in_enable(struct es2_ap_dev *es2)
399 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
400 urb = es2->arpc_urb[i];
402 ret = usb_submit_urb(urb, GFP_KERNEL);
404 dev_err(&es2->usb_dev->dev,
405 "failed to submit arpc in-urb: %d\n", ret);
413 for (--i; i >= 0; --i) {
414 urb = es2->arpc_urb[i];
421 static void es2_arpc_in_disable(struct es2_ap_dev *es2)
426 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
427 urb = es2->arpc_urb[i];
432 static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
434 struct urb *urb = NULL;
438 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
440 /* Look in our pool of allocated urbs first, as that's the "fastest" */
441 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
442 if (es2->cport_out_urb_busy[i] == false &&
443 es2->cport_out_urb_cancelled[i] == false) {
444 es2->cport_out_urb_busy[i] = true;
445 urb = es2->cport_out_urb[i];
449 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
454 * Crap, pool is empty, complain to the syslog and go allocate one
455 * dynamically as we have to succeed.
457 dev_dbg(&es2->usb_dev->dev,
458 "No free CPort OUT urbs, having to dynamically allocate one!\n");
459 return usb_alloc_urb(0, gfp_mask);
462 static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
467 * See if this was an urb in our pool, if so mark it "free", otherwise
468 * we need to free it ourselves.
470 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
471 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
472 if (urb == es2->cport_out_urb[i]) {
473 es2->cport_out_urb_busy[i] = false;
478 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
480 /* If urb is not NULL, then we need to free this urb */
485 * We (ab)use the operation-message header pad bytes to transfer the
486 * cport id in order to minimise overhead.
489 gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
491 header->pad[0] = cport_id;
494 /* Clear the pad bytes used for the CPort id */
495 static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
500 /* Extract the CPort id packed into the header, and clear it */
501 static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
503 u16 cport_id = header->pad[0];
505 gb_message_cport_clear(header);
511 * Returns zero if the message was successfully queued, or a negative errno
514 static int message_send(struct gb_host_device *hd, u16 cport_id,
515 struct gb_message *message, gfp_t gfp_mask)
517 struct es2_ap_dev *es2 = hd_to_es2(hd);
518 struct usb_device *udev = es2->usb_dev;
526 * The data actually transferred will include an indication
527 * of where the data should be sent. Do one last check of
528 * the target CPort id before filling it in.
530 if (!cport_id_valid(hd, cport_id)) {
531 dev_err(&udev->dev, "invalid cport %u\n", cport_id);
535 /* Find a free urb */
536 urb = next_free_urb(es2, gfp_mask);
540 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
541 message->hcpriv = urb;
542 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
544 /* Pack the cport id into the message header */
545 gb_message_cport_pack(message->header, cport_id);
547 buffer_size = sizeof(*message->header) + message->payload_size;
549 ep_pair = cport_to_ep_pair(es2, cport_id);
550 usb_fill_bulk_urb(urb, udev,
551 usb_sndbulkpipe(udev,
552 es2->cport_out[ep_pair].endpoint),
553 message->buffer, buffer_size,
554 cport_out_callback, message);
555 urb->transfer_flags |= URB_ZERO_PACKET;
557 trace_gb_message_submit(message);
559 retval = usb_submit_urb(urb, gfp_mask);
561 dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
563 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
564 message->hcpriv = NULL;
565 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
568 gb_message_cport_clear(message->header);
577 * Can not be called in atomic context.
579 static void message_cancel(struct gb_message *message)
581 struct gb_host_device *hd = message->operation->connection->hd;
582 struct es2_ap_dev *es2 = hd_to_es2(hd);
588 spin_lock_irq(&es2->cport_out_urb_lock);
589 urb = message->hcpriv;
591 /* Prevent dynamically allocated urb from being deallocated. */
594 /* Prevent pre-allocated urb from being reused. */
595 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
596 if (urb == es2->cport_out_urb[i]) {
597 es2->cport_out_urb_cancelled[i] = true;
601 spin_unlock_irq(&es2->cport_out_urb_lock);
605 if (i < NUM_CPORT_OUT_URB) {
606 spin_lock_irq(&es2->cport_out_urb_lock);
607 es2->cport_out_urb_cancelled[i] = false;
608 spin_unlock_irq(&es2->cport_out_urb_lock);
614 static int cport_reset(struct gb_host_device *hd, u16 cport_id)
616 struct es2_ap_dev *es2 = hd_to_es2(hd);
617 struct usb_device *udev = es2->usb_dev;
618 struct arpc_cport_reset_req req;
623 case GB_SVC_CPORT_ID:
624 case ES2_CPORT_CDSI0:
625 case ES2_CPORT_CDSI1:
629 req.cport_id = cpu_to_le16(cport_id);
630 retval = arpc_sync(es2, ARPC_TYPE_CPORT_RESET, &req, sizeof(req),
631 &result, ES2_ARPC_CPORT_TIMEOUT);
632 if (retval == -EREMOTEIO) {
633 dev_err(&udev->dev, "failed to reset cport %u: %d\n", cport_id,
640 static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
643 struct es2_ap_dev *es2 = hd_to_es2(hd);
644 struct ida *id_map = &hd->cport_id_map;
645 int ida_start, ida_end;
648 case ES2_CPORT_CDSI0:
649 case ES2_CPORT_CDSI1:
650 dev_err(&hd->dev, "cport %d not available\n", cport_id);
654 if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
655 flags & GB_CONNECTION_FLAG_CDSI1) {
656 if (es2->cdsi1_in_use) {
657 dev_err(&hd->dev, "CDSI1 already in use\n");
661 es2->cdsi1_in_use = true;
663 return ES2_CPORT_CDSI1;
668 ida_end = hd->num_cports;
669 } else if (cport_id < hd->num_cports) {
670 ida_start = cport_id;
671 ida_end = cport_id + 1;
673 dev_err(&hd->dev, "cport %d not available\n", cport_id);
677 return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
680 static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
682 struct es2_ap_dev *es2 = hd_to_es2(hd);
685 case ES2_CPORT_CDSI1:
686 es2->cdsi1_in_use = false;
690 ida_simple_remove(&hd->cport_id_map, cport_id);
693 static int cport_enable(struct gb_host_device *hd, u16 cport_id,
696 struct es2_ap_dev *es2 = hd_to_es2(hd);
697 struct usb_device *udev = es2->usb_dev;
698 struct gb_apb_request_cport_flags *req;
699 u32 connection_flags;
702 req = kzalloc(sizeof(*req), GFP_KERNEL);
706 connection_flags = 0;
707 if (flags & GB_CONNECTION_FLAG_CONTROL)
708 connection_flags |= GB_APB_CPORT_FLAG_CONTROL;
709 if (flags & GB_CONNECTION_FLAG_HIGH_PRIO)
710 connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO;
712 req->flags = cpu_to_le32(connection_flags);
714 dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__,
715 cport_id, connection_flags);
717 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
718 GB_APB_REQUEST_CPORT_FLAGS,
719 USB_DIR_OUT | USB_TYPE_VENDOR |
720 USB_RECIP_INTERFACE, cport_id, 0,
721 req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
722 if (ret != sizeof(*req)) {
723 dev_err(&udev->dev, "failed to set cport flags for port %d\n",
738 static int cport_disable(struct gb_host_device *hd, u16 cport_id)
742 retval = cport_reset(hd, cport_id);
749 static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id)
751 struct es2_ap_dev *es2 = hd_to_es2(hd);
752 struct device *dev = &es2->usb_dev->dev;
753 struct arpc_cport_connected_req req;
756 req.cport_id = cpu_to_le16(cport_id);
757 ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req),
758 NULL, ES2_ARPC_CPORT_TIMEOUT);
760 dev_err(dev, "failed to set connected state for cport %u: %d\n",
768 static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id)
770 struct es2_ap_dev *es2 = hd_to_es2(hd);
771 struct device *dev = &es2->usb_dev->dev;
772 struct arpc_cport_flush_req req;
775 req.cport_id = cpu_to_le16(cport_id);
776 ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req),
777 NULL, ES2_ARPC_CPORT_TIMEOUT);
779 dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret);
786 static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id,
787 u8 phase, unsigned int timeout)
789 struct es2_ap_dev *es2 = hd_to_es2(hd);
790 struct device *dev = &es2->usb_dev->dev;
791 struct arpc_cport_shutdown_req req;
795 if (timeout > U16_MAX)
798 req.cport_id = cpu_to_le16(cport_id);
799 req.timeout = cpu_to_le16(timeout);
801 ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req),
802 &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
804 dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n",
805 cport_id, ret, result);
812 static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id,
813 size_t peer_space, unsigned int timeout)
815 struct es2_ap_dev *es2 = hd_to_es2(hd);
816 struct device *dev = &es2->usb_dev->dev;
817 struct arpc_cport_quiesce_req req;
821 if (peer_space > U16_MAX)
824 if (timeout > U16_MAX)
827 req.cport_id = cpu_to_le16(cport_id);
828 req.peer_space = cpu_to_le16(peer_space);
829 req.timeout = cpu_to_le16(timeout);
830 ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req),
831 &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
833 dev_err(dev, "failed to quiesce cport %u: %d (%d)\n",
834 cport_id, ret, result);
841 static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id)
843 struct es2_ap_dev *es2 = hd_to_es2(hd);
844 struct device *dev = &es2->usb_dev->dev;
845 struct arpc_cport_clear_req req;
848 req.cport_id = cpu_to_le16(cport_id);
849 ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req),
850 NULL, ES2_ARPC_CPORT_TIMEOUT);
852 dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret);
859 static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
862 struct es2_ap_dev *es2 = hd_to_es2(hd);
863 struct usb_device *udev = es2->usb_dev;
865 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
866 GB_APB_REQUEST_LATENCY_TAG_EN,
867 USB_DIR_OUT | USB_TYPE_VENDOR |
868 USB_RECIP_INTERFACE, cport_id, 0, NULL,
869 0, ES2_USB_CTRL_TIMEOUT);
872 dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
877 static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
880 struct es2_ap_dev *es2 = hd_to_es2(hd);
881 struct usb_device *udev = es2->usb_dev;
883 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
884 GB_APB_REQUEST_LATENCY_TAG_DIS,
885 USB_DIR_OUT | USB_TYPE_VENDOR |
886 USB_RECIP_INTERFACE, cport_id, 0, NULL,
887 0, ES2_USB_CTRL_TIMEOUT);
890 dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
895 static int cport_features_enable(struct gb_host_device *hd, u16 cport_id)
898 struct es2_ap_dev *es2 = hd_to_es2(hd);
899 struct usb_device *udev = es2->usb_dev;
901 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
902 GB_APB_REQUEST_CPORT_FEAT_EN,
903 USB_DIR_OUT | USB_TYPE_VENDOR |
904 USB_RECIP_INTERFACE, cport_id, 0, NULL,
905 0, ES2_USB_CTRL_TIMEOUT);
907 dev_err(&udev->dev, "Cannot enable CPort features for cport %u: %d\n",
912 static int cport_features_disable(struct gb_host_device *hd, u16 cport_id)
915 struct es2_ap_dev *es2 = hd_to_es2(hd);
916 struct usb_device *udev = es2->usb_dev;
918 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
919 GB_APB_REQUEST_CPORT_FEAT_DIS,
920 USB_DIR_OUT | USB_TYPE_VENDOR |
921 USB_RECIP_INTERFACE, cport_id, 0, NULL,
922 0, ES2_USB_CTRL_TIMEOUT);
925 "Cannot disable CPort features for cport %u: %d\n",
930 static int timesync_enable(struct gb_host_device *hd, u8 count,
931 u64 frame_time, u32 strobe_delay, u32 refclk)
934 struct es2_ap_dev *es2 = hd_to_es2(hd);
935 struct usb_device *udev = es2->usb_dev;
936 struct gb_control_timesync_enable_request *request;
938 request = kzalloc(sizeof(*request), GFP_KERNEL);
942 request->count = count;
943 request->frame_time = cpu_to_le64(frame_time);
944 request->strobe_delay = cpu_to_le32(strobe_delay);
945 request->refclk = cpu_to_le32(refclk);
946 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
947 GB_APB_REQUEST_TIMESYNC_ENABLE,
948 USB_DIR_OUT | USB_TYPE_VENDOR |
949 USB_RECIP_INTERFACE, 0, 0, request,
950 sizeof(*request), ES2_USB_CTRL_TIMEOUT);
952 dev_err(&udev->dev, "Cannot enable timesync %d\n", retval);
958 static int timesync_disable(struct gb_host_device *hd)
961 struct es2_ap_dev *es2 = hd_to_es2(hd);
962 struct usb_device *udev = es2->usb_dev;
964 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
965 GB_APB_REQUEST_TIMESYNC_DISABLE,
966 USB_DIR_OUT | USB_TYPE_VENDOR |
967 USB_RECIP_INTERFACE, 0, 0, NULL,
968 0, ES2_USB_CTRL_TIMEOUT);
970 dev_err(&udev->dev, "Cannot disable timesync %d\n", retval);
975 static int timesync_authoritative(struct gb_host_device *hd, u64 *frame_time)
978 struct es2_ap_dev *es2 = hd_to_es2(hd);
979 struct usb_device *udev = es2->usb_dev;
980 struct timesync_authoritative_request *request;
982 request = kzalloc(sizeof(*request), GFP_KERNEL);
986 for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
987 request->frame_time[i] = cpu_to_le64(frame_time[i]);
989 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
990 GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE,
991 USB_DIR_OUT | USB_TYPE_VENDOR |
992 USB_RECIP_INTERFACE, 0, 0, request,
993 sizeof(*request), ES2_USB_CTRL_TIMEOUT);
995 dev_err(&udev->dev, "Cannot timesync authoritative out %d\n", retval);
1001 static int timesync_get_last_event(struct gb_host_device *hd, u64 *frame_time)
1004 struct es2_ap_dev *es2 = hd_to_es2(hd);
1005 struct usb_device *udev = es2->usb_dev;
1006 __le64 *response_frame_time;
1008 response_frame_time = kzalloc(sizeof(*response_frame_time), GFP_KERNEL);
1009 if (!response_frame_time)
1012 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1013 GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT,
1014 USB_DIR_IN | USB_TYPE_VENDOR |
1015 USB_RECIP_INTERFACE, 0, 0, response_frame_time,
1016 sizeof(*response_frame_time),
1017 ES2_USB_CTRL_TIMEOUT);
1019 if (retval != sizeof(*response_frame_time)) {
1020 dev_err(&udev->dev, "Cannot get last TimeSync event: %d\n",
1028 *frame_time = le64_to_cpu(*response_frame_time);
1031 kfree(response_frame_time);
1035 static struct gb_hd_driver es2_driver = {
1036 .hd_priv_size = sizeof(struct es2_ap_dev),
1037 .message_send = message_send,
1038 .message_cancel = message_cancel,
1039 .cport_allocate = es2_cport_allocate,
1040 .cport_release = es2_cport_release,
1041 .cport_enable = cport_enable,
1042 .cport_disable = cport_disable,
1043 .cport_connected = es2_cport_connected,
1044 .cport_flush = es2_cport_flush,
1045 .cport_shutdown = es2_cport_shutdown,
1046 .cport_quiesce = es2_cport_quiesce,
1047 .cport_clear = es2_cport_clear,
1048 .latency_tag_enable = latency_tag_enable,
1049 .latency_tag_disable = latency_tag_disable,
1051 .cport_features_enable = cport_features_enable,
1052 .cport_features_disable = cport_features_disable,
1053 .timesync_enable = timesync_enable,
1054 .timesync_disable = timesync_disable,
1055 .timesync_authoritative = timesync_authoritative,
1056 .timesync_get_last_event = timesync_get_last_event,
1059 /* Common function to report consistent warnings based on URB status */
1060 static int check_urb_status(struct urb *urb)
1062 struct device *dev = &urb->dev->dev;
1063 int status = urb->status;
1070 dev_err(dev, "%s: overflow actual length is %d\n",
1071 __func__, urb->actual_length);
1077 /* device is gone, stop sending */
1080 dev_err(dev, "%s: unknown status %d\n", __func__, status);
1085 static void es2_destroy(struct es2_ap_dev *es2)
1087 struct usb_device *udev;
1091 debugfs_remove(es2->apb_log_enable_dentry);
1092 usb_log_disable(es2);
1094 /* Tear down everything! */
1095 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
1096 struct urb *urb = es2->cport_out_urb[i];
1102 es2->cport_out_urb[i] = NULL;
1103 es2->cport_out_urb_busy[i] = false; /* just to be anal */
1106 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
1107 struct urb *urb = es2->arpc_urb[i];
1112 kfree(es2->arpc_buffer[i]);
1113 es2->arpc_buffer[i] = NULL;
1116 for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
1117 struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];
1119 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
1120 struct urb *urb = cport_in->urb[i];
1125 kfree(cport_in->buffer[i]);
1126 cport_in->buffer[i] = NULL;
1130 kfree(es2->cport_to_ep);
1132 /* release reserved CDSI0 and CDSI1 cports */
1133 gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
1134 gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);
1136 udev = es2->usb_dev;
1142 static void cport_in_callback(struct urb *urb)
1144 struct gb_host_device *hd = urb->context;
1145 struct device *dev = &urb->dev->dev;
1146 struct gb_operation_msg_hdr *header;
1147 int status = check_urb_status(urb);
1152 if ((status == -EAGAIN) || (status == -EPROTO))
1155 /* The urb is being unlinked */
1156 if (status == -ENOENT || status == -ESHUTDOWN)
1159 dev_err(dev, "urb cport in error %d (dropped)\n", status);
1163 if (urb->actual_length < sizeof(*header)) {
1164 dev_err(dev, "short message received\n");
1168 /* Extract the CPort id, which is packed in the message header */
1169 header = urb->transfer_buffer;
1170 cport_id = gb_message_cport_unpack(header);
1172 if (cport_id_valid(hd, cport_id)) {
1173 greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
1174 urb->actual_length);
1176 dev_err(dev, "invalid cport id %u received\n", cport_id);
1179 /* put our urb back in the request pool */
1180 retval = usb_submit_urb(urb, GFP_ATOMIC);
1182 dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
1185 static void cport_out_callback(struct urb *urb)
1187 struct gb_message *message = urb->context;
1188 struct gb_host_device *hd = message->operation->connection->hd;
1189 struct es2_ap_dev *es2 = hd_to_es2(hd);
1190 int status = check_urb_status(urb);
1191 unsigned long flags;
1193 gb_message_cport_clear(message->header);
1195 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
1196 message->hcpriv = NULL;
1197 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
1200 * Tell the submitter that the message send (attempt) is
1201 * complete, and report the status.
1203 greybus_message_sent(hd, message, status);
1208 static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
1212 if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX)
1215 rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
1219 INIT_LIST_HEAD(&rpc->list);
1220 rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL);
1224 rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL);
1228 rpc->req->type = type;
1229 rpc->req->size = cpu_to_le16(sizeof(rpc->req) + size);
1230 memcpy(rpc->req->data, payload, size);
1232 init_completion(&rpc->response_received);
1244 static void arpc_free(struct arpc *rpc)
1251 static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id)
1255 list_for_each_entry(rpc, &es2->arpcs, list) {
1256 if (rpc->req->id == id)
1263 static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc)
1266 rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++);
1267 list_add_tail(&rpc->list, &es2->arpcs);
1270 static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc)
1273 rpc->active = false;
1274 list_del(&rpc->list);
1278 static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout)
1280 struct usb_device *udev = es2->usb_dev;
1283 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1284 GB_APB_REQUEST_ARPC_RUN,
1285 USB_DIR_OUT | USB_TYPE_VENDOR |
1286 USB_RECIP_INTERFACE,
1288 rpc->req, le16_to_cpu(rpc->req->size),
1289 ES2_USB_CTRL_TIMEOUT);
1290 if (retval != le16_to_cpu(rpc->req->size)) {
1292 "failed to send ARPC request %d: %d\n",
1293 rpc->req->type, retval);
1302 static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
1303 size_t size, int *result, unsigned int timeout)
1306 unsigned long flags;
1312 rpc = arpc_alloc(payload, size, type);
1316 spin_lock_irqsave(&es2->arpc_lock, flags);
1318 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1320 retval = arpc_send(es2, rpc, timeout);
1324 retval = wait_for_completion_interruptible_timeout(
1325 &rpc->response_received,
1326 msecs_to_jiffies(timeout));
1329 retval = -ETIMEDOUT;
1333 if (rpc->resp->result) {
1334 retval = -EREMOTEIO;
1336 *result = rpc->resp->result;
1342 spin_lock_irqsave(&es2->arpc_lock, flags);
1344 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1347 if (retval < 0 && retval != -EREMOTEIO) {
1348 dev_err(&es2->usb_dev->dev,
1349 "failed to execute ARPC: %d\n", retval);
1355 static void arpc_in_callback(struct urb *urb)
1357 struct es2_ap_dev *es2 = urb->context;
1358 struct device *dev = &urb->dev->dev;
1359 int status = check_urb_status(urb);
1361 struct arpc_response_message *resp;
1362 unsigned long flags;
1366 if ((status == -EAGAIN) || (status == -EPROTO))
1369 /* The urb is being unlinked */
1370 if (status == -ENOENT || status == -ESHUTDOWN)
1373 dev_err(dev, "arpc in-urb error %d (dropped)\n", status);
1377 if (urb->actual_length < sizeof(*resp)) {
1378 dev_err(dev, "short aprc response received\n");
1382 resp = urb->transfer_buffer;
1383 spin_lock_irqsave(&es2->arpc_lock, flags);
1384 rpc = arpc_find(es2, resp->id);
1386 dev_err(dev, "invalid arpc response id received: %u\n",
1387 le16_to_cpu(resp->id));
1388 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1393 memcpy(rpc->resp, resp, sizeof(*resp));
1394 complete(&rpc->response_received);
1395 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1398 /* put our urb back in the request pool */
1399 retval = usb_submit_urb(urb, GFP_ATOMIC);
1401 dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval);
1404 #define APB1_LOG_MSG_SIZE 64
1405 static void apb_log_get(struct es2_ap_dev *es2, char *buf)
1410 retval = usb_control_msg(es2->usb_dev,
1411 usb_rcvctrlpipe(es2->usb_dev, 0),
1413 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1417 ES2_USB_CTRL_TIMEOUT);
1419 kfifo_in(&es2->apb_log_fifo, buf, retval);
1420 } while (retval > 0);
1423 static int apb_log_poll(void *data)
1425 struct es2_ap_dev *es2 = data;
1428 buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
1432 while (!kthread_should_stop()) {
1434 apb_log_get(es2, buf);
1442 static ssize_t apb_log_read(struct file *f, char __user *buf,
1443 size_t count, loff_t *ppos)
1445 struct es2_ap_dev *es2 = f->f_inode->i_private;
1450 if (count > APB1_LOG_SIZE)
1451 count = APB1_LOG_SIZE;
1453 tmp_buf = kmalloc(count, GFP_KERNEL);
1457 copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
1458 ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
1465 static const struct file_operations apb_log_fops = {
1466 .read = apb_log_read,
1469 static void usb_log_enable(struct es2_ap_dev *es2)
1471 if (!IS_ERR_OR_NULL(es2->apb_log_task))
1474 /* get log from APB1 */
1475 es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
1476 if (IS_ERR(es2->apb_log_task))
1478 /* XXX We will need to rename this per APB */
1479 es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
1480 gb_debugfs_get(), es2,
1484 static void usb_log_disable(struct es2_ap_dev *es2)
1486 if (IS_ERR_OR_NULL(es2->apb_log_task))
1489 debugfs_remove(es2->apb_log_dentry);
1490 es2->apb_log_dentry = NULL;
1492 kthread_stop(es2->apb_log_task);
1493 es2->apb_log_task = NULL;
1496 static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
1497 size_t count, loff_t *ppos)
1499 struct es2_ap_dev *es2 = f->f_inode->i_private;
1500 int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
1503 sprintf(tmp_buf, "%d\n", enable);
1504 return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
1507 static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
1508 size_t count, loff_t *ppos)
1512 struct es2_ap_dev *es2 = f->f_inode->i_private;
1514 retval = kstrtoint_from_user(buf, count, 10, &enable);
1519 usb_log_enable(es2);
1521 usb_log_disable(es2);
1526 static const struct file_operations apb_log_enable_fops = {
1527 .read = apb_log_enable_read,
1528 .write = apb_log_enable_write,
1531 static int apb_get_cport_count(struct usb_device *udev)
1534 __le16 *cport_count;
1536 cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
1540 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1541 GB_APB_REQUEST_CPORT_COUNT,
1542 USB_DIR_IN | USB_TYPE_VENDOR |
1543 USB_RECIP_INTERFACE, 0, 0, cport_count,
1544 sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT);
1545 if (retval != sizeof(*cport_count)) {
1546 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1555 retval = le16_to_cpu(*cport_count);
1557 /* We need to fit a CPort ID in one byte of a message header */
1558 if (retval > U8_MAX) {
1560 dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
1569 * The ES2 USB Bridge device has 15 endpoints
1570 * 1 Control - usual USB stuff + AP -> APBridgeA messages
1571 * 7 Bulk IN - CPort data in
1572 * 7 Bulk OUT - CPort data out
1574 static int ap_probe(struct usb_interface *interface,
1575 const struct usb_device_id *id)
1577 struct es2_ap_dev *es2;
1578 struct gb_host_device *hd;
1579 struct usb_device *udev;
1580 struct usb_host_interface *iface_desc;
1581 struct usb_endpoint_descriptor *endpoint;
1588 udev = usb_get_dev(interface_to_usbdev(interface));
1590 num_cports = apb_get_cport_count(udev);
1591 if (num_cports < 0) {
1593 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1598 hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
1605 es2 = hd_to_es2(hd);
1607 es2->usb_intf = interface;
1608 es2->usb_dev = udev;
1609 spin_lock_init(&es2->cport_out_urb_lock);
1610 INIT_KFIFO(es2->apb_log_fifo);
1611 usb_set_intfdata(interface, es2);
1614 * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
1617 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
1620 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
1624 es2->cport_to_ep = kcalloc(hd->num_cports, sizeof(*es2->cport_to_ep),
1626 if (!es2->cport_to_ep) {
1631 /* find all bulk endpoints */
1632 iface_desc = interface->cur_altsetting;
1633 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1634 endpoint = &iface_desc->endpoint[i].desc;
1636 if (usb_endpoint_is_bulk_in(endpoint)) {
1637 if (bulk_in < NUM_BULKS)
1638 es2->cport_in[bulk_in].endpoint =
1639 endpoint->bEndpointAddress;
1641 es2->arpc_endpoint_in =
1642 endpoint->bEndpointAddress;
1644 } else if (usb_endpoint_is_bulk_out(endpoint)) {
1645 es2->cport_out[bulk_out++].endpoint =
1646 endpoint->bEndpointAddress;
1649 "Unknown endpoint type found, address 0x%02x\n",
1650 endpoint->bEndpointAddress);
1653 if (bulk_in != NUM_BULKS_IN || bulk_out != NUM_BULKS_OUT) {
1654 dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
1659 /* Allocate buffers for our cport in messages */
1660 for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
1661 struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];
1663 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
1667 urb = usb_alloc_urb(0, GFP_KERNEL);
1672 cport_in->urb[i] = urb;
1674 buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
1680 usb_fill_bulk_urb(urb, udev,
1681 usb_rcvbulkpipe(udev,
1682 cport_in->endpoint),
1683 buffer, ES2_GBUF_MSG_SIZE_MAX,
1684 cport_in_callback, hd);
1686 cport_in->buffer[i] = buffer;
1690 /* Allocate buffers for ARPC in messages */
1691 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
1695 urb = usb_alloc_urb(0, GFP_KERNEL);
1700 es2->arpc_urb[i] = urb;
1702 buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL);
1708 usb_fill_bulk_urb(urb, udev,
1709 usb_rcvbulkpipe(udev,
1710 es2->arpc_endpoint_in),
1711 buffer, ARPC_IN_SIZE_MAX,
1712 arpc_in_callback, es2);
1714 es2->arpc_buffer[i] = buffer;
1717 /* Allocate urbs for our CPort OUT messages */
1718 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
1721 urb = usb_alloc_urb(0, GFP_KERNEL);
1727 es2->cport_out_urb[i] = urb;
1728 es2->cport_out_urb_busy[i] = false; /* just to be anal */
1731 /* XXX We will need to rename this per APB */
1732 es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
1733 (S_IWUSR | S_IRUGO),
1734 gb_debugfs_get(), es2,
1735 &apb_log_enable_fops);
1737 INIT_LIST_HEAD(&es2->arpcs);
1738 spin_lock_init(&es2->arpc_lock);
1740 if (es2_arpc_in_enable(es2))
1743 retval = gb_hd_add(hd);
1745 goto err_disable_arpc_in;
1747 for (i = 0; i < NUM_BULKS; ++i) {
1748 retval = es2_cport_in_enable(es2, &es2->cport_in[i]);
1750 goto err_disable_cport_in;
1755 err_disable_cport_in:
1756 for (--i; i >= 0; --i)
1757 es2_cport_in_disable(es2, &es2->cport_in[i]);
1759 err_disable_arpc_in:
1760 es2_arpc_in_disable(es2);
1767 static void ap_disconnect(struct usb_interface *interface)
1769 struct es2_ap_dev *es2 = usb_get_intfdata(interface);
1774 for (i = 0; i < NUM_BULKS; ++i)
1775 es2_cport_in_disable(es2, &es2->cport_in[i]);
1776 es2_arpc_in_disable(es2);
1781 static struct usb_driver es2_ap_driver = {
1782 .name = "es2_ap_driver",
1784 .disconnect = ap_disconnect,
1785 .id_table = id_table,
1789 module_usb_driver(es2_ap_driver);
1791 MODULE_LICENSE("GPL v2");
1792 MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");