1 #include <linux/etherdevice.h>
2 #include <linux/if_macvlan.h>
3 #include <linux/if_vlan.h>
4 #include <linux/interrupt.h>
5 #include <linux/nsproxy.h>
6 #include <linux/compat.h>
7 #include <linux/if_tun.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/cache.h>
11 #include <linux/sched.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/wait.h>
16 #include <linux/cdev.h>
17 #include <linux/idr.h>
20 #include <net/net_namespace.h>
21 #include <net/rtnetlink.h>
23 #include <linux/virtio_net.h>
26 * A macvtap queue is the central object of this driver, it connects
27 * an open character device to a macvlan interface. There can be
28 * multiple queues on one interface, which map back to queues
29 * implemented in hardware on the underlying device.
31 * macvtap_proto is used to allocate queues through the sock allocation
35 struct macvtap_queue {
40 struct macvlan_dev __rcu *vlan;
45 struct list_head next;
48 static struct proto macvtap_proto = {
51 .obj_size = sizeof (struct macvtap_queue),
55 * Variables for dealing with macvtaps device numbers.
57 static dev_t macvtap_major;
58 #define MACVTAP_NUM_DEVS (1U << MINORBITS)
59 static DEFINE_MUTEX(minor_lock);
60 static DEFINE_IDR(minor_idr);
62 #define GOODCOPY_LEN 128
63 static struct class *macvtap_class;
64 static struct cdev macvtap_cdev;
66 static const struct proto_ops macvtap_socket_ops;
70 * The macvtap_queue and the macvlan_dev are loosely coupled, the
71 * pointers from one to the other can only be read while rcu_read_lock
74 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
75 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
76 * q->vlan becomes inaccessible. When the files gets closed,
77 * macvtap_get_queue() fails.
79 * There may still be references to the struct sock inside of the
80 * queue from outbound SKBs, but these never reference back to the
81 * file or the dev. The data structure is freed through __sk_free
82 * when both our references and any pending SKBs are gone.
85 static int macvtap_enable_queue(struct net_device *dev, struct file *file,
86 struct macvtap_queue *q)
88 struct macvlan_dev *vlan = netdev_priv(dev);
97 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
98 q->queue_index = vlan->numvtaps;
106 static int macvtap_set_queue(struct net_device *dev, struct file *file,
107 struct macvtap_queue *q)
109 struct macvlan_dev *vlan = netdev_priv(dev);
113 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
117 rcu_assign_pointer(q->vlan, vlan);
118 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
122 q->queue_index = vlan->numvtaps;
124 file->private_data = q;
125 list_add_tail(&q->next, &vlan->queue_list);
135 static int macvtap_disable_queue(struct macvtap_queue *q)
137 struct macvlan_dev *vlan;
138 struct macvtap_queue *nq;
144 vlan = rtnl_dereference(q->vlan);
147 int index = q->queue_index;
148 BUG_ON(index >= vlan->numvtaps);
149 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
150 nq->queue_index = index;
152 rcu_assign_pointer(vlan->taps[index], nq);
153 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
163 * The file owning the queue got closed, give up both
164 * the reference that the files holds as well as the
165 * one from the macvlan_dev if that still exists.
167 * Using the spinlock makes sure that we don't get
168 * to the queue again after destroying it.
170 static void macvtap_put_queue(struct macvtap_queue *q)
172 struct macvlan_dev *vlan;
175 vlan = rtnl_dereference(q->vlan);
179 BUG_ON(macvtap_disable_queue(q));
182 RCU_INIT_POINTER(q->vlan, NULL);
184 list_del_init(&q->next);
194 * Select a queue based on the rxq of the device on which this packet
195 * arrived. If the incoming device is not mq, calculate a flow hash
196 * to select a queue. If all fails, find the first available queue.
197 * Cache vlan->numvtaps since it can become zero during the execution
200 static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
203 struct macvlan_dev *vlan = netdev_priv(dev);
204 struct macvtap_queue *tap = NULL;
205 /* Access to taps array is protected by rcu, but access to numvtaps
206 * isn't. Below we use it to lookup a queue, but treat it as a hint
207 * and validate that the result isn't NULL - in case we are
208 * racing against queue removal.
210 int numvtaps = ACCESS_ONCE(vlan->numvtaps);
216 /* Check if we can use flow to select a queue */
217 rxq = skb_get_rxhash(skb);
219 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
223 if (likely(skb_rx_queue_recorded(skb))) {
224 rxq = skb_get_rx_queue(skb);
226 while (unlikely(rxq >= numvtaps))
229 tap = rcu_dereference(vlan->taps[rxq]);
233 tap = rcu_dereference(vlan->taps[0]);
239 * The net_device is going away, give up the reference
240 * that it holds on all queues and safely set the pointer
241 * from the queues to NULL.
243 static void macvtap_del_queues(struct net_device *dev)
245 struct macvlan_dev *vlan = netdev_priv(dev);
246 struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES];
250 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
251 list_del_init(&q->next);
253 RCU_INIT_POINTER(q->vlan, NULL);
258 for (i = 0; i < vlan->numvtaps; i++)
259 RCU_INIT_POINTER(vlan->taps[i], NULL);
260 BUG_ON(vlan->numvtaps);
261 BUG_ON(vlan->numqueues);
262 /* guarantee that any future macvtap_set_queue will fail */
263 vlan->numvtaps = MAX_MACVTAP_QUEUES;
265 for (--j; j >= 0; j--)
266 sock_put(&qlist[j]->sk);
270 * Forward happens for data that gets sent from one macvlan
271 * endpoint to another one in bridge mode. We just take
272 * the skb and put it into the receive queue.
274 static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
276 struct macvtap_queue *q = macvtap_get_queue(dev, skb);
280 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
283 skb_queue_tail(&q->sk.sk_receive_queue, skb);
284 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
285 return NET_RX_SUCCESS;
293 * Receive is for data from the external interface (lowerdev),
294 * in case of macvtap, we can treat that the same way as
295 * forward, which macvlan cannot.
297 static int macvtap_receive(struct sk_buff *skb)
299 skb_push(skb, ETH_HLEN);
300 return macvtap_forward(skb->dev, skb);
303 static int macvtap_get_minor(struct macvlan_dev *vlan)
305 int retval = -ENOMEM;
307 mutex_lock(&minor_lock);
308 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
310 vlan->minor = retval;
311 } else if (retval == -ENOSPC) {
312 printk(KERN_ERR "too many macvtap devices\n");
315 mutex_unlock(&minor_lock);
316 return retval < 0 ? retval : 0;
319 static void macvtap_free_minor(struct macvlan_dev *vlan)
321 mutex_lock(&minor_lock);
323 idr_remove(&minor_idr, vlan->minor);
326 mutex_unlock(&minor_lock);
329 static struct net_device *dev_get_by_macvtap_minor(int minor)
331 struct net_device *dev = NULL;
332 struct macvlan_dev *vlan;
334 mutex_lock(&minor_lock);
335 vlan = idr_find(&minor_idr, minor);
340 mutex_unlock(&minor_lock);
344 static int macvtap_newlink(struct net *src_net,
345 struct net_device *dev,
347 struct nlattr *data[])
349 struct macvlan_dev *vlan = netdev_priv(dev);
350 INIT_LIST_HEAD(&vlan->queue_list);
352 /* Don't put anything that may fail after macvlan_common_newlink
353 * because we can't undo what it does.
355 return macvlan_common_newlink(src_net, dev, tb, data,
356 macvtap_receive, macvtap_forward);
359 static void macvtap_dellink(struct net_device *dev,
360 struct list_head *head)
362 macvtap_del_queues(dev);
363 macvlan_dellink(dev, head);
366 static void macvtap_setup(struct net_device *dev)
368 macvlan_common_setup(dev);
369 dev->tx_queue_len = TUN_READQ_SIZE;
372 static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
374 .setup = macvtap_setup,
375 .newlink = macvtap_newlink,
376 .dellink = macvtap_dellink,
380 static void macvtap_sock_write_space(struct sock *sk)
382 wait_queue_head_t *wqueue;
384 if (!sock_writeable(sk) ||
385 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
388 wqueue = sk_sleep(sk);
389 if (wqueue && waitqueue_active(wqueue))
390 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
393 static void macvtap_sock_destruct(struct sock *sk)
395 skb_queue_purge(&sk->sk_receive_queue);
398 static int macvtap_open(struct inode *inode, struct file *file)
400 struct net *net = current->nsproxy->net_ns;
401 struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
402 struct macvtap_queue *q;
410 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
415 RCU_INIT_POINTER(q->sock.wq, &q->wq);
416 init_waitqueue_head(&q->wq.wait);
417 q->sock.type = SOCK_RAW;
418 q->sock.state = SS_CONNECTED;
420 q->sock.ops = &macvtap_socket_ops;
421 sock_init_data(&q->sock, &q->sk);
422 q->sk.sk_write_space = macvtap_sock_write_space;
423 q->sk.sk_destruct = macvtap_sock_destruct;
424 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
425 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
428 * so far only KVM virtio_net uses macvtap, enable zero copy between
429 * guest kernel and host kernel when lower device supports zerocopy
431 * The macvlan supports zerocopy iff the lower device supports zero
432 * copy so we don't have to look at the lower device directly.
434 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
435 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
437 err = macvtap_set_queue(dev, file, q);
448 static int macvtap_release(struct inode *inode, struct file *file)
450 struct macvtap_queue *q = file->private_data;
451 macvtap_put_queue(q);
455 static unsigned int macvtap_poll(struct file *file, poll_table * wait)
457 struct macvtap_queue *q = file->private_data;
458 unsigned int mask = POLLERR;
464 poll_wait(file, &q->wq.wait, wait);
466 if (!skb_queue_empty(&q->sk.sk_receive_queue))
467 mask |= POLLIN | POLLRDNORM;
469 if (sock_writeable(&q->sk) ||
470 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
471 sock_writeable(&q->sk)))
472 mask |= POLLOUT | POLLWRNORM;
478 static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
479 size_t len, size_t linear,
480 int noblock, int *err)
484 /* Under a page? Don't bother with paged skb. */
485 if (prepad + len < PAGE_SIZE || !linear)
488 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
493 skb_reserve(skb, prepad);
494 skb_put(skb, linear);
495 skb->data_len = len - linear;
496 skb->len += len - linear;
501 /* set skb frags from iovec, this can move to core network code for reuse */
502 static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
503 int offset, size_t count)
505 int len = iov_length(from, count) - offset;
506 int copy = skb_headlen(skb);
507 int size, offset1 = 0;
510 /* Skip over from offset */
511 while (count && (offset >= from->iov_len)) {
512 offset -= from->iov_len;
517 /* copy up to skb headlen */
518 while (count && (copy > 0)) {
519 size = min_t(unsigned int, copy, from->iov_len - offset);
520 if (copy_from_user(skb->data + offset1, from->iov_base + offset,
537 struct page *page[MAX_SKB_FRAGS];
540 unsigned long truesize;
542 len = from->iov_len - offset;
548 base = (unsigned long)from->iov_base + offset;
549 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
550 if (i + size > MAX_SKB_FRAGS)
552 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
553 if (num_pages != size) {
554 for (i = 0; i < num_pages; i++)
558 truesize = size * PAGE_SIZE;
559 skb->data_len += len;
561 skb->truesize += truesize;
562 atomic_add(truesize, &skb->sk->sk_wmem_alloc);
564 int off = base & ~PAGE_MASK;
565 int size = min_t(int, len, PAGE_SIZE - off);
566 __skb_fill_page_desc(skb, i, page[i], off, size);
567 skb_shinfo(skb)->nr_frags++;
568 /* increase sk_wmem_alloc */
580 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
581 * be shared with the tun/tap driver.
583 static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
584 struct virtio_net_hdr *vnet_hdr)
586 unsigned short gso_type = 0;
587 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
588 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
589 case VIRTIO_NET_HDR_GSO_TCPV4:
590 gso_type = SKB_GSO_TCPV4;
592 case VIRTIO_NET_HDR_GSO_TCPV6:
593 gso_type = SKB_GSO_TCPV6;
595 case VIRTIO_NET_HDR_GSO_UDP:
596 gso_type = SKB_GSO_UDP;
602 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
603 gso_type |= SKB_GSO_TCP_ECN;
605 if (vnet_hdr->gso_size == 0)
609 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
610 if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
611 vnet_hdr->csum_offset))
615 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
616 skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
617 skb_shinfo(skb)->gso_type = gso_type;
619 /* Header must be checked, and gso_segs computed. */
620 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
621 skb_shinfo(skb)->gso_segs = 0;
626 static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
627 struct virtio_net_hdr *vnet_hdr)
629 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
631 if (skb_is_gso(skb)) {
632 struct skb_shared_info *sinfo = skb_shinfo(skb);
634 /* This is a hint as to how much should be linear. */
635 vnet_hdr->hdr_len = skb_headlen(skb);
636 vnet_hdr->gso_size = sinfo->gso_size;
637 if (sinfo->gso_type & SKB_GSO_TCPV4)
638 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
639 else if (sinfo->gso_type & SKB_GSO_TCPV6)
640 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
641 else if (sinfo->gso_type & SKB_GSO_UDP)
642 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
645 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
646 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
648 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
650 if (skb->ip_summed == CHECKSUM_PARTIAL) {
651 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
652 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
653 vnet_hdr->csum_offset = skb->csum_offset;
654 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
655 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
656 } /* else everything is zero */
662 /* Get packet from user space buffer */
663 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
664 const struct iovec *iv, unsigned long total_len,
665 size_t count, int noblock)
668 struct macvlan_dev *vlan;
669 unsigned long len = total_len;
671 struct virtio_net_hdr vnet_hdr = { 0 };
672 int vnet_hdr_len = 0;
674 bool zerocopy = false;
676 if (q->flags & IFF_VNET_HDR) {
677 vnet_hdr_len = q->vnet_hdr_sz;
680 if (len < vnet_hdr_len)
684 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
688 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
689 vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
691 vnet_hdr.hdr_len = vnet_hdr.csum_start +
692 vnet_hdr.csum_offset + 2;
694 if (vnet_hdr.hdr_len > len)
699 if (unlikely(len < ETH_HLEN))
703 if (unlikely(count > UIO_MAXIOV))
706 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
710 /* Userspace may produce vectors with count greater than
711 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
712 * to let the rest of data to be fit in the frags.
714 if (count > MAX_SKB_FRAGS) {
715 copylen = iov_length(iv, count - MAX_SKB_FRAGS);
716 if (copylen < vnet_hdr_len)
719 copylen -= vnet_hdr_len;
721 /* There are 256 bytes to be copied in skb, so there is enough
722 * room for skb expand head in case it is used.
723 * The rest buffer is mapped from userspace.
725 if (copylen < vnet_hdr.hdr_len)
726 copylen = vnet_hdr.hdr_len;
728 copylen = GOODCOPY_LEN;
732 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
733 vnet_hdr.hdr_len, noblock, &err);
738 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
740 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
745 skb_set_network_header(skb, ETH_HLEN);
746 skb_reset_mac_header(skb);
747 skb->protocol = eth_hdr(skb)->h_proto;
750 err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
755 skb_probe_transport_header(skb, ETH_HLEN);
758 vlan = rcu_dereference(q->vlan);
759 /* copy skb_ubuf_info for callback when skb has no error */
761 skb_shinfo(skb)->destructor_arg = m->msg_control;
762 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
763 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
766 macvlan_start_xmit(skb, vlan->dev);
778 vlan = rcu_dereference(q->vlan);
780 vlan->dev->stats.tx_dropped++;
786 static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
787 unsigned long count, loff_t pos)
789 struct file *file = iocb->ki_filp;
790 ssize_t result = -ENOLINK;
791 struct macvtap_queue *q = file->private_data;
793 result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
794 file->f_flags & O_NONBLOCK);
798 /* Put packet to the user space buffer */
799 static ssize_t macvtap_put_user(struct macvtap_queue *q,
800 const struct sk_buff *skb,
801 const struct iovec *iv, int len)
803 struct macvlan_dev *vlan;
805 int vnet_hdr_len = 0;
809 if (q->flags & IFF_VNET_HDR) {
810 struct virtio_net_hdr vnet_hdr;
811 vnet_hdr_len = q->vnet_hdr_sz;
812 if ((len -= vnet_hdr_len) < 0)
815 ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
819 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
822 copied = vnet_hdr_len;
824 if (!vlan_tx_tag_present(skb))
825 len = min_t(int, skb->len, len);
832 veth.h_vlan_proto = htons(ETH_P_8021Q);
833 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
835 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
836 len = min_t(int, skb->len + VLAN_HLEN, len);
838 copy = min_t(int, vlan_offset, len);
839 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
845 copy = min_t(int, sizeof(veth), len);
846 ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
853 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
858 vlan = rcu_dereference(q->vlan);
860 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
863 return ret ? ret : copied;
866 static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
867 const struct iovec *iv, unsigned long len,
876 prepare_to_wait(sk_sleep(&q->sk), &wait,
879 /* Read frames from the queue */
880 skb = skb_dequeue(&q->sk.sk_receive_queue);
886 if (signal_pending(current)) {
890 /* Nothing to read, let's sleep */
894 ret = macvtap_put_user(q, skb, iv, len);
900 finish_wait(sk_sleep(&q->sk), &wait);
904 static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
905 unsigned long count, loff_t pos)
907 struct file *file = iocb->ki_filp;
908 struct macvtap_queue *q = file->private_data;
909 ssize_t len, ret = 0;
911 len = iov_length(iv, count);
917 ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
918 ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
923 static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
925 struct macvlan_dev *vlan;
928 vlan = rtnl_dereference(q->vlan);
935 static void macvtap_put_vlan(struct macvlan_dev *vlan)
940 static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
942 struct macvtap_queue *q = file->private_data;
943 struct macvlan_dev *vlan;
946 vlan = macvtap_get_vlan(q);
950 if (flags & IFF_ATTACH_QUEUE)
951 ret = macvtap_enable_queue(vlan->dev, file, q);
952 else if (flags & IFF_DETACH_QUEUE)
953 ret = macvtap_disable_queue(q);
957 macvtap_put_vlan(vlan);
962 * provide compatibility with generic tun/tap interface
964 static long macvtap_ioctl(struct file *file, unsigned int cmd,
967 struct macvtap_queue *q = file->private_data;
968 struct macvlan_dev *vlan;
969 void __user *argp = (void __user *)arg;
970 struct ifreq __user *ifr = argp;
971 unsigned int __user *up = argp;
973 int __user *sp = argp;
979 /* ignore the name, just look at flags */
980 if (get_user(u, &ifr->ifr_flags))
984 if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) !=
985 (IFF_NO_PI | IFF_TAP))
994 vlan = macvtap_get_vlan(q);
1001 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
1002 put_user(q->flags, &ifr->ifr_flags))
1004 macvtap_put_vlan(vlan);
1009 if (get_user(u, &ifr->ifr_flags))
1012 ret = macvtap_ioctl_set_queue(file, u);
1015 case TUNGETFEATURES:
1016 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
1017 IFF_MULTI_QUEUE, up))
1022 if (get_user(u, up))
1025 q->sk.sk_sndbuf = u;
1028 case TUNGETVNETHDRSZ:
1030 if (put_user(s, sp))
1034 case TUNSETVNETHDRSZ:
1035 if (get_user(s, sp))
1037 if (s < (int)sizeof(struct virtio_net_hdr))
1044 /* let the user check for future flags */
1045 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1046 TUN_F_TSO_ECN | TUN_F_UFO))
1049 /* TODO: only accept frames with the features that
1050 got enabled for forwarded frames */
1051 if (!(q->flags & IFF_VNET_HDR))
1060 #ifdef CONFIG_COMPAT
1061 static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
1064 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1068 static const struct file_operations macvtap_fops = {
1069 .owner = THIS_MODULE,
1070 .open = macvtap_open,
1071 .release = macvtap_release,
1072 .aio_read = macvtap_aio_read,
1073 .aio_write = macvtap_aio_write,
1074 .poll = macvtap_poll,
1075 .llseek = no_llseek,
1076 .unlocked_ioctl = macvtap_ioctl,
1077 #ifdef CONFIG_COMPAT
1078 .compat_ioctl = macvtap_compat_ioctl,
1082 static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
1083 struct msghdr *m, size_t total_len)
1085 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1086 return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
1087 m->msg_flags & MSG_DONTWAIT);
1090 static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
1091 struct msghdr *m, size_t total_len,
1094 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1096 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1098 ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
1099 flags & MSG_DONTWAIT);
1100 if (ret > total_len) {
1101 m->msg_flags |= MSG_TRUNC;
1102 ret = flags & MSG_TRUNC ? ret : total_len;
1107 /* Ops structure to mimic raw sockets with tun */
1108 static const struct proto_ops macvtap_socket_ops = {
1109 .sendmsg = macvtap_sendmsg,
1110 .recvmsg = macvtap_recvmsg,
1113 /* Get an underlying socket object from tun file. Returns error unless file is
1114 * attached to a device. The returned object works like a packet socket, it
1115 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1116 * holding a reference to the file for as long as the socket is in use. */
1117 struct socket *macvtap_get_socket(struct file *file)
1119 struct macvtap_queue *q;
1120 if (file->f_op != &macvtap_fops)
1121 return ERR_PTR(-EINVAL);
1122 q = file->private_data;
1124 return ERR_PTR(-EBADFD);
1127 EXPORT_SYMBOL_GPL(macvtap_get_socket);
1129 static int macvtap_device_event(struct notifier_block *unused,
1130 unsigned long event, void *ptr)
1132 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1133 struct macvlan_dev *vlan;
1134 struct device *classdev;
1138 if (dev->rtnl_link_ops != &macvtap_link_ops)
1141 vlan = netdev_priv(dev);
1144 case NETDEV_REGISTER:
1145 /* Create the device node here after the network device has
1146 * been registered but before register_netdevice has
1149 err = macvtap_get_minor(vlan);
1151 return notifier_from_errno(err);
1153 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1154 classdev = device_create(macvtap_class, &dev->dev, devt,
1155 dev, "tap%d", dev->ifindex);
1156 if (IS_ERR(classdev)) {
1157 macvtap_free_minor(vlan);
1158 return notifier_from_errno(PTR_ERR(classdev));
1161 case NETDEV_UNREGISTER:
1162 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1163 device_destroy(macvtap_class, devt);
1164 macvtap_free_minor(vlan);
1171 static struct notifier_block macvtap_notifier_block __read_mostly = {
1172 .notifier_call = macvtap_device_event,
1175 static int macvtap_init(void)
1179 err = alloc_chrdev_region(&macvtap_major, 0,
1180 MACVTAP_NUM_DEVS, "macvtap");
1184 cdev_init(&macvtap_cdev, &macvtap_fops);
1185 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
1189 macvtap_class = class_create(THIS_MODULE, "macvtap");
1190 if (IS_ERR(macvtap_class)) {
1191 err = PTR_ERR(macvtap_class);
1195 err = register_netdevice_notifier(&macvtap_notifier_block);
1199 err = macvlan_link_register(&macvtap_link_ops);
1206 unregister_netdevice_notifier(&macvtap_notifier_block);
1208 class_unregister(macvtap_class);
1210 cdev_del(&macvtap_cdev);
1212 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1216 module_init(macvtap_init);
1218 static void macvtap_exit(void)
1220 rtnl_link_unregister(&macvtap_link_ops);
1221 unregister_netdevice_notifier(&macvtap_notifier_block);
1222 class_unregister(macvtap_class);
1223 cdev_del(&macvtap_cdev);
1224 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1226 module_exit(macvtap_exit);
1228 MODULE_ALIAS_RTNL_LINK("macvtap");
1229 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1230 MODULE_LICENSE("GPL");