2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/if_vlan.h>
35 #include <linux/slab.h>
37 #include <net/route.h>
39 #include <net/pkt_sched.h>
41 #include "hyperv_net.h"
43 #define RING_SIZE_MIN 64
44 #define LINKCHANGE_INT (2 * HZ)
45 #define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
50 static int ring_size = 128;
51 module_param(ring_size, int, S_IRUGO);
52 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
54 static int max_num_vrss_chns = 8;
56 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
57 NETIF_MSG_LINK | NETIF_MSG_IFUP |
58 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
61 static int debug = -1;
62 module_param(debug, int, S_IRUGO);
63 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
65 static void do_set_multicast(struct work_struct *w)
67 struct net_device_context *ndevctx =
68 container_of(w, struct net_device_context, work);
69 struct hv_device *device_obj = ndevctx->device_ctx;
70 struct net_device *ndev = hv_get_drvdata(device_obj);
71 struct netvsc_device *nvdev = ndevctx->nvdev;
72 struct rndis_device *rdev;
77 rdev = nvdev->extension;
81 if (ndev->flags & IFF_PROMISC)
82 rndis_filter_set_packet_filter(rdev,
83 NDIS_PACKET_TYPE_PROMISCUOUS);
85 rndis_filter_set_packet_filter(rdev,
86 NDIS_PACKET_TYPE_BROADCAST |
87 NDIS_PACKET_TYPE_ALL_MULTICAST |
88 NDIS_PACKET_TYPE_DIRECTED);
91 static void netvsc_set_multicast_list(struct net_device *net)
93 struct net_device_context *net_device_ctx = netdev_priv(net);
95 schedule_work(&net_device_ctx->work);
98 static int netvsc_open(struct net_device *net)
100 struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
101 struct rndis_device *rdev;
104 netif_carrier_off(net);
106 /* Open up the device */
107 ret = rndis_filter_open(nvdev);
109 netdev_err(net, "unable to open device (ret %d).\n", ret);
113 netif_tx_wake_all_queues(net);
115 rdev = nvdev->extension;
116 if (!rdev->link_state)
117 netif_carrier_on(net);
122 static int netvsc_close(struct net_device *net)
124 struct net_device_context *net_device_ctx = netdev_priv(net);
125 struct netvsc_device *nvdev = net_device_ctx->nvdev;
127 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
128 struct vmbus_channel *chn;
130 netif_tx_disable(net);
132 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
133 cancel_work_sync(&net_device_ctx->work);
134 ret = rndis_filter_close(nvdev);
136 netdev_err(net, "unable to close device (ret %d).\n", ret);
140 /* Ensure pending bytes in ring are read */
143 for (i = 0; i < nvdev->num_chn; i++) {
144 chn = nvdev->chn_table[i];
148 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
154 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
162 if (retry > retry_max || aread == 0)
172 netdev_err(net, "Ring buffer not empty after closing rndis\n");
179 static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
182 struct rndis_packet *rndis_pkt;
183 struct rndis_per_packet_info *ppi;
185 rndis_pkt = &msg->msg.pkt;
186 rndis_pkt->data_offset += ppi_size;
188 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
189 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
191 ppi->size = ppi_size;
192 ppi->type = pkt_type;
193 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
195 rndis_pkt->per_pkt_info_len += ppi_size;
200 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
201 void *accel_priv, select_queue_fallback_t fallback)
203 struct net_device_context *net_device_ctx = netdev_priv(ndev);
204 struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
208 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
211 hash = skb_get_hash(skb);
212 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
213 ndev->real_num_tx_queues;
215 if (!nvsc_dev->chn_table[q_idx])
221 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
222 struct hv_page_buffer *pb)
226 /* Deal with compund pages by ignoring unused part
229 page += (offset >> PAGE_SHIFT);
230 offset &= ~PAGE_MASK;
235 bytes = PAGE_SIZE - offset;
238 pb[j].pfn = page_to_pfn(page);
239 pb[j].offset = offset;
245 if (offset == PAGE_SIZE && len) {
255 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
256 struct hv_netvsc_packet *packet,
257 struct hv_page_buffer **page_buf)
259 struct hv_page_buffer *pb = *page_buf;
261 char *data = skb->data;
262 int frags = skb_shinfo(skb)->nr_frags;
265 /* The packet is laid out thus:
266 * 1. hdr: RNDIS header and PPI
268 * 3. skb fragment data
271 slots_used += fill_pg_buf(virt_to_page(hdr),
273 len, &pb[slots_used]);
275 packet->rmsg_size = len;
276 packet->rmsg_pgcnt = slots_used;
278 slots_used += fill_pg_buf(virt_to_page(data),
279 offset_in_page(data),
280 skb_headlen(skb), &pb[slots_used]);
282 for (i = 0; i < frags; i++) {
283 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
285 slots_used += fill_pg_buf(skb_frag_page(frag),
287 skb_frag_size(frag), &pb[slots_used]);
292 static int count_skb_frag_slots(struct sk_buff *skb)
294 int i, frags = skb_shinfo(skb)->nr_frags;
297 for (i = 0; i < frags; i++) {
298 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
299 unsigned long size = skb_frag_size(frag);
300 unsigned long offset = frag->page_offset;
302 /* Skip unused frames from start of page */
303 offset &= ~PAGE_MASK;
304 pages += PFN_UP(offset + size);
309 static int netvsc_get_slots(struct sk_buff *skb)
311 char *data = skb->data;
312 unsigned int offset = offset_in_page(data);
313 unsigned int len = skb_headlen(skb);
317 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
318 frag_slots = count_skb_frag_slots(skb);
319 return slots + frag_slots;
322 static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
324 u32 ret_val = TRANSPORT_INFO_NOT_IP;
326 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
327 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
331 *trans_off = skb_transport_offset(skb);
333 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
334 struct iphdr *iphdr = ip_hdr(skb);
336 if (iphdr->protocol == IPPROTO_TCP)
337 ret_val = TRANSPORT_INFO_IPV4_TCP;
338 else if (iphdr->protocol == IPPROTO_UDP)
339 ret_val = TRANSPORT_INFO_IPV4_UDP;
341 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
342 ret_val = TRANSPORT_INFO_IPV6_TCP;
343 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
344 ret_val = TRANSPORT_INFO_IPV6_UDP;
351 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
353 struct net_device_context *net_device_ctx = netdev_priv(net);
354 struct hv_netvsc_packet *packet = NULL;
356 unsigned int num_data_pgs;
357 struct rndis_message *rndis_msg;
358 struct rndis_packet *rndis_pkt;
360 struct rndis_per_packet_info *ppi;
361 struct ndis_tcp_ip_checksum_info *csum_info;
366 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
367 struct hv_page_buffer *pb = page_buf;
369 /* We will atmost need two pages to describe the rndis
370 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
371 * of pages in a single packet. If skb is scattered around
372 * more pages we try linearizing it.
375 skb_length = skb->len;
376 num_data_pgs = netvsc_get_slots(skb) + 2;
378 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
379 ++net_device_ctx->eth_stats.tx_scattered;
381 if (skb_linearize(skb))
384 num_data_pgs = netvsc_get_slots(skb) + 2;
385 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
386 ++net_device_ctx->eth_stats.tx_too_big;
392 * Place the rndis header in the skb head room and
393 * the skb->cb will be used for hv_netvsc_packet
396 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
400 /* Use the skb control buffer for building up the packet */
401 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
402 FIELD_SIZEOF(struct sk_buff, cb));
403 packet = (struct hv_netvsc_packet *)skb->cb;
405 packet->q_idx = skb_get_queue_mapping(skb);
407 packet->total_data_buflen = skb->len;
409 rndis_msg = (struct rndis_message *)skb->head;
411 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
413 /* Add the rndis header */
414 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
415 rndis_msg->msg_len = packet->total_data_buflen;
416 rndis_pkt = &rndis_msg->msg.pkt;
417 rndis_pkt->data_offset = sizeof(struct rndis_packet);
418 rndis_pkt->data_len = packet->total_data_buflen;
419 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
421 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
423 hash = skb_get_hash_raw(skb);
424 if (hash != 0 && net->real_num_tx_queues > 1) {
425 rndis_msg_size += NDIS_HASH_PPI_SIZE;
426 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
428 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
431 if (skb_vlan_tag_present(skb)) {
432 struct ndis_pkt_8021q_info *vlan;
434 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
435 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
437 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
439 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
440 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
444 net_trans_info = get_net_transport_info(skb, &hdr_offset);
447 * Setup the sendside checksum offload only if this is not a
450 if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
451 struct ndis_tcp_lso_info *lso_info;
453 rndis_msg_size += NDIS_LSO_PPI_SIZE;
454 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
455 TCP_LARGESEND_PKTINFO);
457 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
460 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
461 if (net_trans_info & (INFO_IPV4 << 16)) {
462 lso_info->lso_v2_transmit.ip_version =
463 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
464 ip_hdr(skb)->tot_len = 0;
465 ip_hdr(skb)->check = 0;
466 tcp_hdr(skb)->check =
467 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
468 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
470 lso_info->lso_v2_transmit.ip_version =
471 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
472 ipv6_hdr(skb)->payload_len = 0;
473 tcp_hdr(skb)->check =
474 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
475 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
477 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
478 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
479 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
480 if (net_trans_info & INFO_TCP) {
481 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
482 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
483 TCPIP_CHKSUM_PKTINFO);
485 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
488 if (net_trans_info & (INFO_IPV4 << 16))
489 csum_info->transmit.is_ipv4 = 1;
491 csum_info->transmit.is_ipv6 = 1;
493 csum_info->transmit.tcp_checksum = 1;
494 csum_info->transmit.tcp_header_offset = hdr_offset;
496 /* UDP checksum (and other) offload is not supported. */
497 if (skb_checksum_help(skb))
502 /* Start filling in the page buffers with the rndis hdr */
503 rndis_msg->msg_len += rndis_msg_size;
504 packet->total_data_buflen = rndis_msg->msg_len;
505 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
508 /* timestamp packet in software */
509 skb_tx_timestamp(skb);
510 ret = netvsc_send(net_device_ctx->device_ctx, packet,
511 rndis_msg, &pb, skb);
512 if (likely(ret == 0)) {
513 struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
515 u64_stats_update_begin(&tx_stats->syncp);
517 tx_stats->bytes += skb_length;
518 u64_stats_update_end(&tx_stats->syncp);
522 if (ret == -EAGAIN) {
523 ++net_device_ctx->eth_stats.tx_busy;
524 return NETDEV_TX_BUSY;
528 ++net_device_ctx->eth_stats.tx_no_space;
531 dev_kfree_skb_any(skb);
532 net->stats.tx_dropped++;
537 ++net_device_ctx->eth_stats.tx_no_memory;
542 * netvsc_linkstatus_callback - Link up/down notification
544 void netvsc_linkstatus_callback(struct hv_device *device_obj,
545 struct rndis_message *resp)
547 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
548 struct net_device *net;
549 struct net_device_context *ndev_ctx;
550 struct netvsc_reconfig *event;
553 net = hv_get_drvdata(device_obj);
558 ndev_ctx = netdev_priv(net);
560 /* Update the physical link speed when changing to another vSwitch */
561 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
564 speed = *(u32 *)((void *)indicate + indicate->
565 status_buf_offset) / 10000;
566 ndev_ctx->speed = speed;
570 /* Handle these link change statuses below */
571 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
572 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
573 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
576 if (net->reg_state != NETREG_REGISTERED)
579 event = kzalloc(sizeof(*event), GFP_ATOMIC);
582 event->event = indicate->status;
584 spin_lock_irqsave(&ndev_ctx->lock, flags);
585 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
586 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
588 schedule_delayed_work(&ndev_ctx->dwork, 0);
591 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
592 struct hv_netvsc_packet *packet,
593 struct ndis_tcp_ip_checksum_info *csum_info,
594 void *data, u16 vlan_tci)
598 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
603 * Copy to skb. This copy is needed here since the memory pointed by
604 * hv_netvsc_packet cannot be deallocated
606 memcpy(skb_put(skb, packet->total_data_buflen), data,
607 packet->total_data_buflen);
609 skb->protocol = eth_type_trans(skb, net);
611 /* skb is already created with CHECKSUM_NONE */
612 skb_checksum_none_assert(skb);
615 * In Linux, the IP checksum is always checked.
616 * Do L4 checksum offload if enabled and present.
618 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
619 if (csum_info->receive.tcp_checksum_succeeded ||
620 csum_info->receive.udp_checksum_succeeded)
621 skb->ip_summed = CHECKSUM_UNNECESSARY;
624 if (vlan_tci & VLAN_TAG_PRESENT)
625 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
632 * netvsc_recv_callback - Callback when we receive a packet from the
633 * "wire" on the specified device.
635 int netvsc_recv_callback(struct hv_device *device_obj,
636 struct hv_netvsc_packet *packet,
638 struct ndis_tcp_ip_checksum_info *csum_info,
639 struct vmbus_channel *channel,
642 struct net_device *net = hv_get_drvdata(device_obj);
643 struct net_device_context *net_device_ctx = netdev_priv(net);
644 struct net_device *vf_netdev;
646 struct netvsc_stats *rx_stats;
648 if (net->reg_state != NETREG_REGISTERED)
649 return NVSP_STAT_FAIL;
652 * If necessary, inject this packet into the VF interface.
653 * On Hyper-V, multicast and brodcast packets are only delivered
654 * to the synthetic interface (after subjecting these to
655 * policy filters on the host). Deliver these via the VF
656 * interface in the guest.
658 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
659 if (vf_netdev && (vf_netdev->flags & IFF_UP))
662 /* Allocate a skb - TODO direct I/O to pages? */
663 skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
664 if (unlikely(!skb)) {
665 ++net->stats.rx_dropped;
666 return NVSP_STAT_FAIL;
669 if (net != vf_netdev)
670 skb_record_rx_queue(skb,
671 channel->offermsg.offer.sub_channel_index);
674 * Even if injecting the packet, record the statistics
675 * on the synthetic device because modifying the VF device
676 * statistics will not work correctly.
678 rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
679 u64_stats_update_begin(&rx_stats->syncp);
681 rx_stats->bytes += packet->total_data_buflen;
683 if (skb->pkt_type == PACKET_BROADCAST)
684 ++rx_stats->broadcast;
685 else if (skb->pkt_type == PACKET_MULTICAST)
686 ++rx_stats->multicast;
687 u64_stats_update_end(&rx_stats->syncp);
690 * Pass the skb back up. Network stack will deallocate the skb when it
699 static void netvsc_get_drvinfo(struct net_device *net,
700 struct ethtool_drvinfo *info)
702 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
703 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
706 static void netvsc_get_channels(struct net_device *net,
707 struct ethtool_channels *channel)
709 struct net_device_context *net_device_ctx = netdev_priv(net);
710 struct netvsc_device *nvdev = net_device_ctx->nvdev;
713 channel->max_combined = nvdev->max_chn;
714 channel->combined_count = nvdev->num_chn;
718 static int netvsc_set_channels(struct net_device *net,
719 struct ethtool_channels *channels)
721 struct net_device_context *net_device_ctx = netdev_priv(net);
722 struct hv_device *dev = net_device_ctx->device_ctx;
723 struct netvsc_device *nvdev = net_device_ctx->nvdev;
724 struct netvsc_device_info device_info;
728 bool recovering = false;
730 if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
733 num_chn = nvdev->num_chn;
734 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
736 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
737 pr_info("vRSS unsupported before NVSP Version 5\n");
741 /* We do not support rx, tx, or other */
743 channels->rx_count ||
744 channels->tx_count ||
745 channels->other_count ||
746 (channels->combined_count < 1))
749 if (channels->combined_count > max_chn) {
750 pr_info("combined channels too high, using %d\n", max_chn);
751 channels->combined_count = max_chn;
754 ret = netvsc_close(net);
759 net_device_ctx->start_remove = true;
760 rndis_filter_device_remove(dev);
762 nvdev->num_chn = channels->combined_count;
764 memset(&device_info, 0, sizeof(device_info));
765 device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
766 device_info.ring_size = ring_size;
767 device_info.max_num_vrss_chns = max_num_vrss_chns;
769 ret = rndis_filter_device_add(dev, &device_info);
772 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
778 nvdev = net_device_ctx->nvdev;
780 ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
783 netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
789 ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
792 netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
800 net_device_ctx->start_remove = false;
801 /* We may have missed link change notifications */
802 schedule_delayed_work(&net_device_ctx->dwork, 0);
807 /* If the above failed, we attempt to recover through the same
808 * process but with the original number of channels.
810 netdev_err(net, "could not set channels, recovering\n");
812 channels->combined_count = num_chn;
816 static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
818 struct ethtool_cmd diff1 = *cmd;
819 struct ethtool_cmd diff2 = {};
821 ethtool_cmd_speed_set(&diff1, 0);
823 /* advertising and cmd are usually set */
824 diff1.advertising = 0;
826 /* We set port to PORT_OTHER */
827 diff2.port = PORT_OTHER;
829 return !memcmp(&diff1, &diff2, sizeof(diff1));
832 static void netvsc_init_settings(struct net_device *dev)
834 struct net_device_context *ndc = netdev_priv(dev);
836 ndc->speed = SPEED_UNKNOWN;
837 ndc->duplex = DUPLEX_UNKNOWN;
840 static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
842 struct net_device_context *ndc = netdev_priv(dev);
844 ethtool_cmd_speed_set(cmd, ndc->speed);
845 cmd->duplex = ndc->duplex;
846 cmd->port = PORT_OTHER;
851 static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
853 struct net_device_context *ndc = netdev_priv(dev);
856 speed = ethtool_cmd_speed(cmd);
857 if (!ethtool_validate_speed(speed) ||
858 !ethtool_validate_duplex(cmd->duplex) ||
859 !netvsc_validate_ethtool_ss_cmd(cmd))
863 ndc->duplex = cmd->duplex;
868 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
870 struct net_device_context *ndevctx = netdev_priv(ndev);
871 struct netvsc_device *nvdev = ndevctx->nvdev;
872 struct hv_device *hdev = ndevctx->device_ctx;
873 struct netvsc_device_info device_info;
874 int limit = ETH_DATA_LEN;
878 if (ndevctx->start_remove || !nvdev || nvdev->destroy)
881 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
882 limit = NETVSC_MTU - ETH_HLEN;
884 if (mtu < NETVSC_MTU_MIN || mtu > limit)
887 ret = netvsc_close(ndev);
891 num_chn = nvdev->num_chn;
893 ndevctx->start_remove = true;
894 rndis_filter_device_remove(hdev);
898 memset(&device_info, 0, sizeof(device_info));
899 device_info.ring_size = ring_size;
900 device_info.num_chn = num_chn;
901 device_info.max_num_vrss_chns = max_num_vrss_chns;
902 rndis_filter_device_add(hdev, &device_info);
906 ndevctx->start_remove = false;
908 /* We may have missed link change notifications */
909 schedule_delayed_work(&ndevctx->dwork, 0);
914 static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
915 struct rtnl_link_stats64 *t)
917 struct net_device_context *ndev_ctx = netdev_priv(net);
920 for_each_possible_cpu(cpu) {
921 struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
923 struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
925 u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast;
929 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
930 tx_packets = tx_stats->packets;
931 tx_bytes = tx_stats->bytes;
932 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
935 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
936 rx_packets = rx_stats->packets;
937 rx_bytes = rx_stats->bytes;
938 rx_multicast = rx_stats->multicast + rx_stats->broadcast;
939 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
941 t->tx_bytes += tx_bytes;
942 t->tx_packets += tx_packets;
943 t->rx_bytes += rx_bytes;
944 t->rx_packets += rx_packets;
945 t->multicast += rx_multicast;
948 t->tx_dropped = net->stats.tx_dropped;
949 t->tx_errors = net->stats.tx_dropped;
951 t->rx_dropped = net->stats.rx_dropped;
952 t->rx_errors = net->stats.rx_errors;
957 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
959 struct sockaddr *addr = p;
960 char save_adr[ETH_ALEN];
961 unsigned char save_aatype;
964 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
965 save_aatype = ndev->addr_assign_type;
967 err = eth_mac_addr(ndev, p);
971 err = rndis_filter_set_device_mac(ndev, addr->sa_data);
973 /* roll back to saved MAC */
974 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
975 ndev->addr_assign_type = save_aatype;
981 static const struct {
982 char name[ETH_GSTRING_LEN];
985 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
986 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
987 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
988 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
989 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
992 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
994 switch (string_set) {
996 return ARRAY_SIZE(netvsc_stats);
1002 static void netvsc_get_ethtool_stats(struct net_device *dev,
1003 struct ethtool_stats *stats, u64 *data)
1005 struct net_device_context *ndc = netdev_priv(dev);
1006 const void *nds = &ndc->eth_stats;
1009 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
1010 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1013 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1017 switch (stringset) {
1019 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
1020 memcpy(data + i * ETH_GSTRING_LEN,
1021 netvsc_stats[i].name, ETH_GSTRING_LEN);
1026 #ifdef CONFIG_NET_POLL_CONTROLLER
1027 static void netvsc_poll_controller(struct net_device *net)
1029 /* As netvsc_start_xmit() works synchronous we don't have to
1030 * trigger anything here.
1035 static const struct ethtool_ops ethtool_ops = {
1036 .get_drvinfo = netvsc_get_drvinfo,
1037 .get_link = ethtool_op_get_link,
1038 .get_ethtool_stats = netvsc_get_ethtool_stats,
1039 .get_sset_count = netvsc_get_sset_count,
1040 .get_strings = netvsc_get_strings,
1041 .get_channels = netvsc_get_channels,
1042 .set_channels = netvsc_set_channels,
1043 .get_ts_info = ethtool_op_get_ts_info,
1044 .get_settings = netvsc_get_settings,
1045 .set_settings = netvsc_set_settings,
1048 static const struct net_device_ops device_ops = {
1049 .ndo_open = netvsc_open,
1050 .ndo_stop = netvsc_close,
1051 .ndo_start_xmit = netvsc_start_xmit,
1052 .ndo_set_rx_mode = netvsc_set_multicast_list,
1053 .ndo_change_mtu = netvsc_change_mtu,
1054 .ndo_validate_addr = eth_validate_addr,
1055 .ndo_set_mac_address = netvsc_set_mac_addr,
1056 .ndo_select_queue = netvsc_select_queue,
1057 .ndo_get_stats64 = netvsc_get_stats64,
1058 #ifdef CONFIG_NET_POLL_CONTROLLER
1059 .ndo_poll_controller = netvsc_poll_controller,
1064 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1065 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1066 * present send GARP packet to network peers with netif_notify_peers().
1068 static void netvsc_link_change(struct work_struct *w)
1070 struct net_device_context *ndev_ctx =
1071 container_of(w, struct net_device_context, dwork.work);
1072 struct hv_device *device_obj = ndev_ctx->device_ctx;
1073 struct net_device *net = hv_get_drvdata(device_obj);
1074 struct netvsc_device *net_device;
1075 struct rndis_device *rdev;
1076 struct netvsc_reconfig *event = NULL;
1077 bool notify = false, reschedule = false;
1078 unsigned long flags, next_reconfig, delay;
1081 if (ndev_ctx->start_remove)
1084 net_device = ndev_ctx->nvdev;
1085 rdev = net_device->extension;
1087 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1088 if (time_is_after_jiffies(next_reconfig)) {
1089 /* link_watch only sends one notification with current state
1090 * per second, avoid doing reconfig more frequently. Handle
1093 delay = next_reconfig - jiffies;
1094 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1095 schedule_delayed_work(&ndev_ctx->dwork, delay);
1098 ndev_ctx->last_reconfig = jiffies;
1100 spin_lock_irqsave(&ndev_ctx->lock, flags);
1101 if (!list_empty(&ndev_ctx->reconfig_events)) {
1102 event = list_first_entry(&ndev_ctx->reconfig_events,
1103 struct netvsc_reconfig, list);
1104 list_del(&event->list);
1105 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1107 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1112 switch (event->event) {
1113 /* Only the following events are possible due to the check in
1114 * netvsc_linkstatus_callback()
1116 case RNDIS_STATUS_MEDIA_CONNECT:
1117 if (rdev->link_state) {
1118 rdev->link_state = false;
1119 netif_carrier_on(net);
1120 netif_tx_wake_all_queues(net);
1126 case RNDIS_STATUS_MEDIA_DISCONNECT:
1127 if (!rdev->link_state) {
1128 rdev->link_state = true;
1129 netif_carrier_off(net);
1130 netif_tx_stop_all_queues(net);
1134 case RNDIS_STATUS_NETWORK_CHANGE:
1135 /* Only makes sense if carrier is present */
1136 if (!rdev->link_state) {
1137 rdev->link_state = true;
1138 netif_carrier_off(net);
1139 netif_tx_stop_all_queues(net);
1140 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1141 spin_lock_irqsave(&ndev_ctx->lock, flags);
1142 list_add(&event->list, &ndev_ctx->reconfig_events);
1143 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1152 netdev_notify_peers(net);
1154 /* link_watch only sends one notification with current state per
1155 * second, handle next reconfig event in 2 seconds.
1158 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1166 static void netvsc_free_netdev(struct net_device *netdev)
1168 struct net_device_context *net_device_ctx = netdev_priv(netdev);
1170 free_percpu(net_device_ctx->tx_stats);
1171 free_percpu(net_device_ctx->rx_stats);
1172 free_netdev(netdev);
1175 static struct net_device *get_netvsc_bymac(const u8 *mac)
1177 struct net_device *dev;
1181 for_each_netdev(&init_net, dev) {
1182 if (dev->netdev_ops != &device_ops)
1183 continue; /* not a netvsc device */
1185 if (ether_addr_equal(mac, dev->perm_addr))
1192 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1194 struct net_device *dev;
1198 for_each_netdev(&init_net, dev) {
1199 struct net_device_context *net_device_ctx;
1201 if (dev->netdev_ops != &device_ops)
1202 continue; /* not a netvsc device */
1204 net_device_ctx = netdev_priv(dev);
1205 if (net_device_ctx->nvdev == NULL)
1206 continue; /* device is removed */
1208 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
1209 return dev; /* a match */
1215 static int netvsc_register_vf(struct net_device *vf_netdev)
1217 struct net_device *ndev;
1218 struct net_device_context *net_device_ctx;
1219 struct netvsc_device *netvsc_dev;
1221 if (vf_netdev->addr_len != ETH_ALEN)
1225 * We will use the MAC address to locate the synthetic interface to
1226 * associate with the VF interface. If we don't find a matching
1227 * synthetic interface, move on.
1229 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
1233 net_device_ctx = netdev_priv(ndev);
1234 netvsc_dev = net_device_ctx->nvdev;
1235 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
1238 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
1240 * Take a reference on the module.
1242 try_module_get(THIS_MODULE);
1244 dev_hold(vf_netdev);
1245 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
1249 static int netvsc_vf_up(struct net_device *vf_netdev)
1251 struct net_device *ndev;
1252 struct netvsc_device *netvsc_dev;
1253 struct net_device_context *net_device_ctx;
1255 ndev = get_netvsc_byref(vf_netdev);
1259 net_device_ctx = netdev_priv(ndev);
1260 netvsc_dev = net_device_ctx->nvdev;
1262 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
1265 * Open the device before switching data path.
1267 rndis_filter_open(netvsc_dev);
1270 * notify the host to switch the data path.
1272 netvsc_switch_datapath(ndev, true);
1273 netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
1275 netif_carrier_off(ndev);
1277 /* Now notify peers through VF device. */
1278 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
1283 static int netvsc_vf_down(struct net_device *vf_netdev)
1285 struct net_device *ndev;
1286 struct netvsc_device *netvsc_dev;
1287 struct net_device_context *net_device_ctx;
1289 ndev = get_netvsc_byref(vf_netdev);
1293 net_device_ctx = netdev_priv(ndev);
1294 netvsc_dev = net_device_ctx->nvdev;
1296 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
1297 netvsc_switch_datapath(ndev, false);
1298 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
1299 rndis_filter_close(netvsc_dev);
1300 netif_carrier_on(ndev);
1302 /* Now notify peers through netvsc device. */
1303 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
1308 static int netvsc_unregister_vf(struct net_device *vf_netdev)
1310 struct net_device *ndev;
1311 struct netvsc_device *netvsc_dev;
1312 struct net_device_context *net_device_ctx;
1314 ndev = get_netvsc_byref(vf_netdev);
1318 net_device_ctx = netdev_priv(ndev);
1319 netvsc_dev = net_device_ctx->nvdev;
1321 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
1323 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
1325 module_put(THIS_MODULE);
1329 static int netvsc_probe(struct hv_device *dev,
1330 const struct hv_vmbus_device_id *dev_id)
1332 struct net_device *net = NULL;
1333 struct net_device_context *net_device_ctx;
1334 struct netvsc_device_info device_info;
1335 struct netvsc_device *nvdev;
1338 net = alloc_etherdev_mq(sizeof(struct net_device_context),
1343 netif_carrier_off(net);
1345 netvsc_init_settings(net);
1347 net_device_ctx = netdev_priv(net);
1348 net_device_ctx->device_ctx = dev;
1349 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1350 if (netif_msg_probe(net_device_ctx))
1351 netdev_dbg(net, "netvsc msg_enable: %d\n",
1352 net_device_ctx->msg_enable);
1354 net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1355 if (!net_device_ctx->tx_stats) {
1359 net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1360 if (!net_device_ctx->rx_stats) {
1361 free_percpu(net_device_ctx->tx_stats);
1366 hv_set_drvdata(dev, net);
1368 net_device_ctx->start_remove = false;
1370 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
1371 INIT_WORK(&net_device_ctx->work, do_set_multicast);
1373 spin_lock_init(&net_device_ctx->lock);
1374 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1376 net->netdev_ops = &device_ops;
1378 net->hw_features = NETVSC_HW_FEATURES;
1379 net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
1381 net->ethtool_ops = ðtool_ops;
1382 SET_NETDEV_DEV(net, &dev->device);
1384 /* We always need headroom for rndis header */
1385 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1387 /* Notify the netvsc driver of the new device */
1388 memset(&device_info, 0, sizeof(device_info));
1389 device_info.ring_size = ring_size;
1390 device_info.max_num_vrss_chns = max_num_vrss_chns;
1391 ret = rndis_filter_device_add(dev, &device_info);
1393 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
1394 netvsc_free_netdev(net);
1395 hv_set_drvdata(dev, NULL);
1398 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1400 nvdev = net_device_ctx->nvdev;
1401 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1402 netif_set_real_num_rx_queues(net, nvdev->num_chn);
1404 ret = register_netdev(net);
1406 pr_err("Unable to register netdev.\n");
1407 rndis_filter_device_remove(dev);
1408 netvsc_free_netdev(net);
1414 static int netvsc_remove(struct hv_device *dev)
1416 struct net_device *net;
1417 struct net_device_context *ndev_ctx;
1418 struct netvsc_device *net_device;
1420 net = hv_get_drvdata(dev);
1423 dev_err(&dev->device, "No net device to remove\n");
1427 ndev_ctx = netdev_priv(net);
1428 net_device = ndev_ctx->nvdev;
1430 /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
1431 * removing the device.
1434 ndev_ctx->start_remove = true;
1437 cancel_delayed_work_sync(&ndev_ctx->dwork);
1438 cancel_work_sync(&ndev_ctx->work);
1440 /* Stop outbound asap */
1441 netif_tx_disable(net);
1443 unregister_netdev(net);
1446 * Call to the vsc driver to let it know that the device is being
1449 rndis_filter_device_remove(dev);
1451 hv_set_drvdata(dev, NULL);
1453 netvsc_free_netdev(net);
1457 static const struct hv_vmbus_device_id id_table[] = {
1463 MODULE_DEVICE_TABLE(vmbus, id_table);
1465 /* The one and only one */
1466 static struct hv_driver netvsc_drv = {
1467 .name = KBUILD_MODNAME,
1468 .id_table = id_table,
1469 .probe = netvsc_probe,
1470 .remove = netvsc_remove,
1474 * On Hyper-V, every VF interface is matched with a corresponding
1475 * synthetic interface. The synthetic interface is presented first
1476 * to the guest. When the corresponding VF instance is registered,
1477 * we will take care of switching the data path.
1479 static int netvsc_netdev_event(struct notifier_block *this,
1480 unsigned long event, void *ptr)
1482 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1484 /* Skip our own events */
1485 if (event_dev->netdev_ops == &device_ops)
1488 /* Avoid non-Ethernet type devices */
1489 if (event_dev->type != ARPHRD_ETHER)
1492 /* Avoid Vlan dev with same MAC registering as VF */
1493 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1496 /* Avoid Bonding master dev with same MAC registering as VF */
1497 if ((event_dev->priv_flags & IFF_BONDING) &&
1498 (event_dev->flags & IFF_MASTER))
1502 case NETDEV_REGISTER:
1503 return netvsc_register_vf(event_dev);
1504 case NETDEV_UNREGISTER:
1505 return netvsc_unregister_vf(event_dev);
1507 return netvsc_vf_up(event_dev);
1509 return netvsc_vf_down(event_dev);
1515 static struct notifier_block netvsc_netdev_notifier = {
1516 .notifier_call = netvsc_netdev_event,
1519 static void __exit netvsc_drv_exit(void)
1521 unregister_netdevice_notifier(&netvsc_netdev_notifier);
1522 vmbus_driver_unregister(&netvsc_drv);
1525 static int __init netvsc_drv_init(void)
1529 if (ring_size < RING_SIZE_MIN) {
1530 ring_size = RING_SIZE_MIN;
1531 pr_info("Increased ring_size to %d (min allowed)\n",
1534 ret = vmbus_driver_register(&netvsc_drv);
1539 register_netdevice_notifier(&netvsc_netdev_notifier);
1543 MODULE_LICENSE("GPL");
1544 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
1546 module_init(netvsc_drv_init);
1547 module_exit(netvsc_drv_exit);