2 * Greybus "AP" USB driver for "ES2" controller chips
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
7 * Released under the GPLv2 only.
9 #include <linux/kthread.h>
10 #include <linux/sizes.h>
11 #include <linux/usb.h>
12 #include <linux/kfifo.h>
13 #include <linux/debugfs.h>
14 #include <asm/unaligned.h>
17 #include "kernel_ver.h"
18 #include "connection.h"
19 #include "greybus_trace.h"
21 /* Memory sizes for the buffers sent to/from the ES2 controller */
22 #define ES2_GBUF_MSG_SIZE_MAX 2048
24 static const struct usb_device_id id_table[] = {
25 { USB_DEVICE(0xffff, 0x0002) }, /* Made up number, delete once firmware is fixed to use real number */
26 { USB_DEVICE(0x18d1, 0x1eaf) },
29 MODULE_DEVICE_TABLE(usb, id_table);
31 #define APB1_LOG_SIZE SZ_16K
33 /* Number of bulk in and bulk out couple */
37 * Number of CPort IN urbs in flight at any point in time.
38 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
41 #define NUM_CPORT_IN_URB 4
43 /* Number of CPort OUT urbs in flight at any point in time.
44 * Adjust if we get messages saying we are out of urbs in the system log.
46 #define NUM_CPORT_OUT_URB (8 * NUM_BULKS)
48 /* vendor request APB1 log */
49 #define REQUEST_LOG 0x02
51 /* vendor request to map a cport to bulk in and bulk out endpoints */
52 #define REQUEST_EP_MAPPING 0x03
54 /* vendor request to get the number of cports available */
55 #define REQUEST_CPORT_COUNT 0x04
57 /* vendor request to reset a cport state */
58 #define REQUEST_RESET_CPORT 0x05
60 /* vendor request to time the latency of messages on a given cport */
61 #define REQUEST_LATENCY_TAG_EN 0x06
62 #define REQUEST_LATENCY_TAG_DIS 0x07
65 * @endpoint: bulk in endpoint for CPort data
66 * @urb: array of urbs for the CPort in messages
67 * @buffer: array of buffers for the @cport_in_urb urbs
71 struct urb *urb[NUM_CPORT_IN_URB];
72 u8 *buffer[NUM_CPORT_IN_URB];
76 * @endpoint: bulk out endpoint for CPort data
78 struct es2_cport_out {
83 * es2_ap_dev - ES2 USB Bridge to AP structure
84 * @usb_dev: pointer to the USB device we are.
85 * @usb_intf: pointer to the USB interface we are bound to.
86 * @hd: pointer to our gb_host_device structure
88 * @cport_in: endpoint, urbs and buffer for cport in messages
89 * @cport_out: endpoint for for cport out messages
90 * @cport_out_urb: array of urbs for the CPort out messages
91 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
93 * @cport_out_urb_cancelled: array of flags indicating whether the
94 * corresponding @cport_out_urb is being cancelled
95 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
97 * @apb_log_task: task pointer for logging thread
98 * @apb_log_dentry: file system entry for the log file interface
99 * @apb_log_enable_dentry: file system entry for enabling logging
100 * @apb_log_fifo: kernel FIFO to carry logged data
103 struct usb_device *usb_dev;
104 struct usb_interface *usb_intf;
105 struct gb_host_device *hd;
107 struct es2_cport_in cport_in[NUM_BULKS];
108 struct es2_cport_out cport_out[NUM_BULKS];
109 struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
110 bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
111 bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
112 spinlock_t cport_out_urb_lock;
116 struct task_struct *apb_log_task;
117 struct dentry *apb_log_dentry;
118 struct dentry *apb_log_enable_dentry;
119 DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
123 * cport_to_ep - information about cport to endpoints mapping
124 * @cport_id: the id of cport to map to endpoints
125 * @endpoint_in: the endpoint number to use for in transfer
126 * @endpoint_out: he endpoint number to use for out transfer
134 static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
136 return (struct es2_ap_dev *)&hd->hd_priv;
139 static void cport_out_callback(struct urb *urb);
140 static void usb_log_enable(struct es2_ap_dev *es2);
141 static void usb_log_disable(struct es2_ap_dev *es2);
143 /* Get the endpoints pair mapped to the cport */
144 static int cport_to_ep_pair(struct es2_ap_dev *es2, u16 cport_id)
146 if (cport_id >= es2->hd->num_cports)
148 return es2->cport_to_ep[cport_id];
151 #define ES2_TIMEOUT 500 /* 500 ms for the SVC to do something */
153 /* Disable for now until we work all of this out to keep a warning-free build */
155 /* Test if the endpoints pair is already mapped to a cport */
156 static int ep_pair_in_use(struct es2_ap_dev *es2, int ep_pair)
160 for (i = 0; i < es2->hd->num_cports; i++) {
161 if (es2->cport_to_ep[i] == ep_pair)
167 /* Configure the endpoint mapping and send the request to APBridge */
168 static int map_cport_to_ep(struct es2_ap_dev *es2,
169 u16 cport_id, int ep_pair)
172 struct cport_to_ep *cport_to_ep;
174 if (ep_pair < 0 || ep_pair >= NUM_BULKS)
176 if (cport_id >= es2->hd->num_cports)
178 if (ep_pair && ep_pair_in_use(es2, ep_pair))
181 cport_to_ep = kmalloc(sizeof(*cport_to_ep), GFP_KERNEL);
185 es2->cport_to_ep[cport_id] = ep_pair;
186 cport_to_ep->cport_id = cpu_to_le16(cport_id);
187 cport_to_ep->endpoint_in = es2->cport_in[ep_pair].endpoint;
188 cport_to_ep->endpoint_out = es2->cport_out[ep_pair].endpoint;
190 retval = usb_control_msg(es2->usb_dev,
191 usb_sndctrlpipe(es2->usb_dev, 0),
193 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
196 sizeof(*cport_to_ep),
198 if (retval == sizeof(*cport_to_ep))
205 /* Unmap a cport: use the muxed endpoints pair */
206 static int unmap_cport(struct es2_ap_dev *es2, u16 cport_id)
208 return map_cport_to_ep(es2, cport_id, 0);
212 static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
214 struct urb *urb = NULL;
218 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
220 /* Look in our pool of allocated urbs first, as that's the "fastest" */
221 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
222 if (es2->cport_out_urb_busy[i] == false &&
223 es2->cport_out_urb_cancelled[i] == false) {
224 es2->cport_out_urb_busy[i] = true;
225 urb = es2->cport_out_urb[i];
229 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
234 * Crap, pool is empty, complain to the syslog and go allocate one
235 * dynamically as we have to succeed.
237 dev_err(&es2->usb_dev->dev,
238 "No free CPort OUT urbs, having to dynamically allocate one!\n");
239 return usb_alloc_urb(0, gfp_mask);
242 static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
247 * See if this was an urb in our pool, if so mark it "free", otherwise
248 * we need to free it ourselves.
250 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
251 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
252 if (urb == es2->cport_out_urb[i]) {
253 es2->cport_out_urb_busy[i] = false;
258 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
260 /* If urb is not NULL, then we need to free this urb */
265 * We (ab)use the operation-message header pad bytes to transfer the
266 * cport id in order to minimise overhead.
269 gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
271 header->pad[0] = cport_id;
274 /* Clear the pad bytes used for the CPort id */
275 static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
280 /* Extract the CPort id packed into the header, and clear it */
281 static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
283 u16 cport_id = header->pad[0];
285 gb_message_cport_clear(header);
291 * Returns zero if the message was successfully queued, or a negative errno
294 static int message_send(struct gb_host_device *hd, u16 cport_id,
295 struct gb_message *message, gfp_t gfp_mask)
297 struct es2_ap_dev *es2 = hd_to_es2(hd);
298 struct usb_device *udev = es2->usb_dev;
306 * The data actually transferred will include an indication
307 * of where the data should be sent. Do one last check of
308 * the target CPort id before filling it in.
310 if (!cport_id_valid(hd, cport_id)) {
311 dev_err(&udev->dev, "invalid destination cport 0x%02x\n",
316 /* Find a free urb */
317 urb = next_free_urb(es2, gfp_mask);
321 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
322 message->hcpriv = urb;
323 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
325 /* Pack the cport id into the message header */
326 gb_message_cport_pack(message->header, cport_id);
328 buffer_size = sizeof(*message->header) + message->payload_size;
330 ep_pair = cport_to_ep_pair(es2, cport_id);
331 usb_fill_bulk_urb(urb, udev,
332 usb_sndbulkpipe(udev,
333 es2->cport_out[ep_pair].endpoint),
334 message->buffer, buffer_size,
335 cport_out_callback, message);
336 urb->transfer_flags |= URB_ZERO_PACKET;
337 trace_gb_host_device_send(hd, cport_id, buffer_size);
338 retval = usb_submit_urb(urb, gfp_mask);
340 dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
342 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
343 message->hcpriv = NULL;
344 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
347 gb_message_cport_clear(message->header);
356 * Can not be called in atomic context.
358 static void message_cancel(struct gb_message *message)
360 struct gb_host_device *hd = message->operation->connection->hd;
361 struct es2_ap_dev *es2 = hd_to_es2(hd);
367 spin_lock_irq(&es2->cport_out_urb_lock);
368 urb = message->hcpriv;
370 /* Prevent dynamically allocated urb from being deallocated. */
373 /* Prevent pre-allocated urb from being reused. */
374 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
375 if (urb == es2->cport_out_urb[i]) {
376 es2->cport_out_urb_cancelled[i] = true;
380 spin_unlock_irq(&es2->cport_out_urb_lock);
384 if (i < NUM_CPORT_OUT_URB) {
385 spin_lock_irq(&es2->cport_out_urb_lock);
386 es2->cport_out_urb_cancelled[i] = false;
387 spin_unlock_irq(&es2->cport_out_urb_lock);
393 static int cport_reset(struct gb_host_device *hd, u16 cport_id)
395 struct es2_ap_dev *es2 = hd_to_es2(hd);
396 struct usb_device *udev = es2->usb_dev;
399 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
401 USB_DIR_OUT | USB_TYPE_VENDOR |
402 USB_RECIP_INTERFACE, 0, cport_id,
403 NULL, 0, ES2_TIMEOUT);
405 dev_err(&udev->dev, "failed to reset cport %hu: %d\n", cport_id,
413 static int cport_enable(struct gb_host_device *hd, u16 cport_id)
417 if (cport_id != GB_SVC_CPORT_ID) {
418 retval = cport_reset(hd, cport_id);
426 static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
429 struct es2_ap_dev *es2 = hd_to_es2(hd);
430 struct usb_device *udev = es2->usb_dev;
432 if (!cport_id_valid(hd, cport_id)) {
433 dev_err(&udev->dev, "invalid destination cport 0x%02x\n",
438 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
439 REQUEST_LATENCY_TAG_EN,
440 USB_DIR_OUT | USB_TYPE_VENDOR |
441 USB_RECIP_INTERFACE, cport_id, 0, NULL,
445 dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
450 static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
453 struct es2_ap_dev *es2 = hd_to_es2(hd);
454 struct usb_device *udev = es2->usb_dev;
456 if (!cport_id_valid(hd, cport_id)) {
457 dev_err(&udev->dev, "invalid destination cport 0x%02x\n",
462 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
463 REQUEST_LATENCY_TAG_DIS,
464 USB_DIR_OUT | USB_TYPE_VENDOR |
465 USB_RECIP_INTERFACE, cport_id, 0, NULL,
469 dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
474 static struct greybus_host_driver es2_driver = {
475 .hd_priv_size = sizeof(struct es2_ap_dev),
476 .message_send = message_send,
477 .message_cancel = message_cancel,
478 .cport_enable = cport_enable,
479 .latency_tag_enable = latency_tag_enable,
480 .latency_tag_disable = latency_tag_disable,
483 /* Common function to report consistent warnings based on URB status */
484 static int check_urb_status(struct urb *urb)
486 struct device *dev = &urb->dev->dev;
487 int status = urb->status;
494 dev_err(dev, "%s: overflow actual length is %d\n",
495 __func__, urb->actual_length);
501 /* device is gone, stop sending */
504 dev_err(dev, "%s: unknown status %d\n", __func__, status);
509 static void ap_disconnect(struct usb_interface *interface)
511 struct es2_ap_dev *es2;
512 struct usb_device *udev;
516 es2 = usb_get_intfdata(interface);
520 usb_log_disable(es2);
522 /* Tear down everything! */
523 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
524 struct urb *urb = es2->cport_out_urb[i];
530 es2->cport_out_urb[i] = NULL;
531 es2->cport_out_urb_busy[i] = false; /* just to be anal */
534 for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
535 struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];
537 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
538 struct urb *urb = cport_in->urb[i];
544 kfree(cport_in->buffer[i]);
545 cport_in->buffer[i] = NULL;
549 usb_set_intfdata(interface, NULL);
551 greybus_remove_hd(es2->hd);
552 kfree(es2->cport_to_ep);
557 static void cport_in_callback(struct urb *urb)
559 struct gb_host_device *hd = urb->context;
560 struct device *dev = &urb->dev->dev;
561 struct gb_operation_msg_hdr *header;
562 int status = check_urb_status(urb);
567 if ((status == -EAGAIN) || (status == -EPROTO))
569 dev_err(dev, "urb cport in error %d (dropped)\n", status);
573 if (urb->actual_length < sizeof(*header)) {
574 dev_err(dev, "short message received\n");
578 /* Extract the CPort id, which is packed in the message header */
579 header = urb->transfer_buffer;
580 cport_id = gb_message_cport_unpack(header);
582 if (cport_id_valid(hd, cport_id)) {
583 trace_gb_host_device_recv(hd, cport_id, urb->actual_length);
584 greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
587 dev_err(dev, "invalid cport id 0x%02x received\n", cport_id);
590 /* put our urb back in the request pool */
591 retval = usb_submit_urb(urb, GFP_ATOMIC);
593 dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
596 static void cport_out_callback(struct urb *urb)
598 struct gb_message *message = urb->context;
599 struct gb_host_device *hd = message->operation->connection->hd;
600 struct es2_ap_dev *es2 = hd_to_es2(hd);
601 int status = check_urb_status(urb);
604 gb_message_cport_clear(message->header);
606 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
607 message->hcpriv = NULL;
608 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
611 * Tell the submitter that the message send (attempt) is
612 * complete, and report the status.
614 greybus_message_sent(hd, message, status);
619 #define APB1_LOG_MSG_SIZE 64
620 static void apb_log_get(struct es2_ap_dev *es2, char *buf)
624 /* SVC messages go down our control pipe */
626 retval = usb_control_msg(es2->usb_dev,
627 usb_rcvctrlpipe(es2->usb_dev, 0),
629 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
635 kfifo_in(&es2->apb_log_fifo, buf, retval);
636 } while (retval > 0);
639 static int apb_log_poll(void *data)
641 struct es2_ap_dev *es2 = data;
644 buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
648 while (!kthread_should_stop()) {
650 apb_log_get(es2, buf);
658 static ssize_t apb_log_read(struct file *f, char __user *buf,
659 size_t count, loff_t *ppos)
661 struct es2_ap_dev *es2 = f->f_inode->i_private;
666 if (count > APB1_LOG_SIZE)
667 count = APB1_LOG_SIZE;
669 tmp_buf = kmalloc(count, GFP_KERNEL);
673 copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
674 ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
681 static const struct file_operations apb_log_fops = {
682 .read = apb_log_read,
685 static void usb_log_enable(struct es2_ap_dev *es2)
687 if (!IS_ERR_OR_NULL(es2->apb_log_task))
690 /* get log from APB1 */
691 es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
692 if (IS_ERR(es2->apb_log_task))
694 /* XXX We will need to rename this per APB */
695 es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
696 gb_debugfs_get(), NULL,
700 static void usb_log_disable(struct es2_ap_dev *es2)
702 if (IS_ERR_OR_NULL(es2->apb_log_task))
705 debugfs_remove(es2->apb_log_dentry);
706 es2->apb_log_dentry = NULL;
708 kthread_stop(es2->apb_log_task);
709 es2->apb_log_task = NULL;
712 static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
713 size_t count, loff_t *ppos)
715 struct es2_ap_dev *es2 = f->f_inode->i_private;
716 int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
719 sprintf(tmp_buf, "%d\n", enable);
720 return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
723 static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
724 size_t count, loff_t *ppos)
728 struct es2_ap_dev *es2 = f->f_inode->i_private;
730 retval = kstrtoint_from_user(buf, count, 10, &enable);
737 usb_log_disable(es2);
742 static const struct file_operations apb_log_enable_fops = {
743 .read = apb_log_enable_read,
744 .write = apb_log_enable_write,
747 static int apb_get_cport_count(struct usb_device *udev)
752 cport_count = kmalloc(sizeof(*cport_count), GFP_KERNEL);
756 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
758 USB_DIR_IN | USB_TYPE_VENDOR |
759 USB_RECIP_INTERFACE, 0, 0, cport_count,
760 sizeof(*cport_count), ES2_TIMEOUT);
762 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
767 retval = le16_to_cpu(*cport_count);
769 /* We need to fit a CPort ID in one byte of a message header */
770 if (retval > U8_MAX) {
772 dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
781 * The ES2 USB Bridge device has 15 endpoints
782 * 1 Control - usual USB stuff + AP -> APBridgeA messages
783 * 7 Bulk IN - CPort data in
784 * 7 Bulk OUT - CPort data out
786 static int ap_probe(struct usb_interface *interface,
787 const struct usb_device_id *id)
789 struct es2_ap_dev *es2;
790 struct gb_host_device *hd;
791 struct usb_device *udev;
792 struct usb_host_interface *iface_desc;
793 struct usb_endpoint_descriptor *endpoint;
796 int retval = -ENOMEM;
800 udev = usb_get_dev(interface_to_usbdev(interface));
802 num_cports = apb_get_cport_count(udev);
803 if (num_cports < 0) {
805 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
810 hd = greybus_create_hd(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
819 es2->usb_intf = interface;
821 spin_lock_init(&es2->cport_out_urb_lock);
822 INIT_KFIFO(es2->apb_log_fifo);
823 usb_set_intfdata(interface, es2);
825 es2->cport_to_ep = kcalloc(hd->num_cports, sizeof(*es2->cport_to_ep),
827 if (!es2->cport_to_ep) {
832 /* find all bulk endpoints */
833 iface_desc = interface->cur_altsetting;
834 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
835 endpoint = &iface_desc->endpoint[i].desc;
837 if (usb_endpoint_is_bulk_in(endpoint)) {
838 es2->cport_in[bulk_in++].endpoint =
839 endpoint->bEndpointAddress;
840 } else if (usb_endpoint_is_bulk_out(endpoint)) {
841 es2->cport_out[bulk_out++].endpoint =
842 endpoint->bEndpointAddress;
845 "Unknown endpoint type found, address %x\n",
846 endpoint->bEndpointAddress);
849 if ((bulk_in == 0) ||
851 dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
855 /* Allocate buffers for our cport in messages and start them up */
856 for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
857 struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];
859 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
863 urb = usb_alloc_urb(0, GFP_KERNEL);
866 buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
870 usb_fill_bulk_urb(urb, udev,
871 usb_rcvbulkpipe(udev,
873 buffer, ES2_GBUF_MSG_SIZE_MAX,
874 cport_in_callback, hd);
875 cport_in->urb[i] = urb;
876 cport_in->buffer[i] = buffer;
877 retval = usb_submit_urb(urb, GFP_KERNEL);
883 /* Allocate urbs for our CPort OUT messages */
884 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
887 urb = usb_alloc_urb(0, GFP_KERNEL);
891 es2->cport_out_urb[i] = urb;
892 es2->cport_out_urb_busy[i] = false; /* just to be anal */
895 /* XXX We will need to rename this per APB */
896 es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
898 gb_debugfs_get(), es2,
899 &apb_log_enable_fops);
902 ap_disconnect(interface);
907 static struct usb_driver es2_ap_driver = {
908 .name = "es2_ap_driver",
910 .disconnect = ap_disconnect,
911 .id_table = id_table,
914 module_usb_driver(es2_ap_driver);
916 MODULE_LICENSE("GPL v2");
917 MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");