2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/if_vlan.h>
35 #include <linux/slab.h>
37 #include <net/route.h>
39 #include <net/pkt_sched.h>
41 #include "hyperv_net.h"
44 #define RING_SIZE_MIN 64
45 #define LINKCHANGE_INT (2 * HZ)
46 #define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
51 static int ring_size = 128;
52 module_param(ring_size, int, S_IRUGO);
53 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
55 static int max_num_vrss_chns = 8;
57 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
58 NETIF_MSG_LINK | NETIF_MSG_IFUP |
59 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
62 static int debug = -1;
63 module_param(debug, int, S_IRUGO);
64 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66 static void do_set_multicast(struct work_struct *w)
68 struct net_device_context *ndevctx =
69 container_of(w, struct net_device_context, work);
70 struct netvsc_device *nvdev;
71 struct rndis_device *rdev;
73 nvdev = hv_get_drvdata(ndevctx->device_ctx);
74 if (nvdev == NULL || nvdev->ndev == NULL)
77 rdev = nvdev->extension;
81 if (nvdev->ndev->flags & IFF_PROMISC)
82 rndis_filter_set_packet_filter(rdev,
83 NDIS_PACKET_TYPE_PROMISCUOUS);
85 rndis_filter_set_packet_filter(rdev,
86 NDIS_PACKET_TYPE_BROADCAST |
87 NDIS_PACKET_TYPE_ALL_MULTICAST |
88 NDIS_PACKET_TYPE_DIRECTED);
91 static void netvsc_set_multicast_list(struct net_device *net)
93 struct net_device_context *net_device_ctx = netdev_priv(net);
95 schedule_work(&net_device_ctx->work);
98 static int netvsc_open(struct net_device *net)
100 struct net_device_context *net_device_ctx = netdev_priv(net);
101 struct hv_device *device_obj = net_device_ctx->device_ctx;
102 struct netvsc_device *nvdev;
103 struct rndis_device *rdev;
106 netif_carrier_off(net);
108 /* Open up the device */
109 ret = rndis_filter_open(device_obj);
111 netdev_err(net, "unable to open device (ret %d).\n", ret);
115 netif_tx_wake_all_queues(net);
117 nvdev = hv_get_drvdata(device_obj);
118 rdev = nvdev->extension;
119 if (!rdev->link_state)
120 netif_carrier_on(net);
125 static int netvsc_close(struct net_device *net)
127 struct net_device_context *net_device_ctx = netdev_priv(net);
128 struct hv_device *device_obj = net_device_ctx->device_ctx;
129 struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
131 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
132 struct vmbus_channel *chn;
134 netif_tx_disable(net);
136 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
137 cancel_work_sync(&net_device_ctx->work);
138 ret = rndis_filter_close(device_obj);
140 netdev_err(net, "unable to close device (ret %d).\n", ret);
144 /* Ensure pending bytes in ring are read */
147 for (i = 0; i < nvdev->num_chn; i++) {
148 chn = nvdev->chn_table[i];
152 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
158 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
166 if (retry > retry_max || aread == 0)
176 netdev_err(net, "Ring buffer not empty after closing rndis\n");
183 static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
186 struct rndis_packet *rndis_pkt;
187 struct rndis_per_packet_info *ppi;
189 rndis_pkt = &msg->msg.pkt;
190 rndis_pkt->data_offset += ppi_size;
192 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
193 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
195 ppi->size = ppi_size;
196 ppi->type = pkt_type;
197 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
199 rndis_pkt->per_pkt_info_len += ppi_size;
204 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
205 void *accel_priv, select_queue_fallback_t fallback)
207 struct net_device_context *net_device_ctx = netdev_priv(ndev);
208 struct hv_device *hdev = net_device_ctx->device_ctx;
209 struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
213 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
216 hash = skb_get_hash(skb);
217 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
218 ndev->real_num_tx_queues;
220 if (!nvsc_dev->chn_table[q_idx])
226 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
227 struct hv_page_buffer *pb)
231 /* Deal with compund pages by ignoring unused part
234 page += (offset >> PAGE_SHIFT);
235 offset &= ~PAGE_MASK;
240 bytes = PAGE_SIZE - offset;
243 pb[j].pfn = page_to_pfn(page);
244 pb[j].offset = offset;
250 if (offset == PAGE_SIZE && len) {
260 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
261 struct hv_netvsc_packet *packet,
262 struct hv_page_buffer **page_buf)
264 struct hv_page_buffer *pb = *page_buf;
266 char *data = skb->data;
267 int frags = skb_shinfo(skb)->nr_frags;
270 /* The packet is laid out thus:
271 * 1. hdr: RNDIS header and PPI
273 * 3. skb fragment data
276 slots_used += fill_pg_buf(virt_to_page(hdr),
278 len, &pb[slots_used]);
280 packet->rmsg_size = len;
281 packet->rmsg_pgcnt = slots_used;
283 slots_used += fill_pg_buf(virt_to_page(data),
284 offset_in_page(data),
285 skb_headlen(skb), &pb[slots_used]);
287 for (i = 0; i < frags; i++) {
288 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
290 slots_used += fill_pg_buf(skb_frag_page(frag),
292 skb_frag_size(frag), &pb[slots_used]);
297 static int count_skb_frag_slots(struct sk_buff *skb)
299 int i, frags = skb_shinfo(skb)->nr_frags;
302 for (i = 0; i < frags; i++) {
303 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
304 unsigned long size = skb_frag_size(frag);
305 unsigned long offset = frag->page_offset;
307 /* Skip unused frames from start of page */
308 offset &= ~PAGE_MASK;
309 pages += PFN_UP(offset + size);
314 static int netvsc_get_slots(struct sk_buff *skb)
316 char *data = skb->data;
317 unsigned int offset = offset_in_page(data);
318 unsigned int len = skb_headlen(skb);
322 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
323 frag_slots = count_skb_frag_slots(skb);
324 return slots + frag_slots;
327 static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
329 u32 ret_val = TRANSPORT_INFO_NOT_IP;
331 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
332 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
336 *trans_off = skb_transport_offset(skb);
338 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
339 struct iphdr *iphdr = ip_hdr(skb);
341 if (iphdr->protocol == IPPROTO_TCP)
342 ret_val = TRANSPORT_INFO_IPV4_TCP;
343 else if (iphdr->protocol == IPPROTO_UDP)
344 ret_val = TRANSPORT_INFO_IPV4_UDP;
346 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
347 ret_val = TRANSPORT_INFO_IPV6_TCP;
348 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
349 ret_val = TRANSPORT_INFO_IPV6_UDP;
356 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
358 struct net_device_context *net_device_ctx = netdev_priv(net);
359 struct hv_netvsc_packet *packet = NULL;
361 unsigned int num_data_pgs;
362 struct rndis_message *rndis_msg;
363 struct rndis_packet *rndis_pkt;
367 struct rndis_per_packet_info *ppi;
368 struct ndis_tcp_ip_checksum_info *csum_info;
369 struct ndis_tcp_lso_info *lso_info;
374 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
375 struct hv_page_buffer *pb = page_buf;
376 struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
378 /* We will atmost need two pages to describe the rndis
379 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
380 * of pages in a single packet. If skb is scattered around
381 * more pages we try linearizing it.
385 skb_length = skb->len;
386 num_data_pgs = netvsc_get_slots(skb) + 2;
387 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
388 net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
389 num_data_pgs, skb->len);
392 } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
393 if (skb_linearize(skb)) {
394 net_alert_ratelimited("failed to linearize skb\n");
403 * Place the rndis header in the skb head room and
404 * the skb->cb will be used for hv_netvsc_packet
407 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
409 netdev_err(net, "unable to alloc hv_netvsc_packet\n");
413 /* Use the skb control buffer for building up the packet */
414 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
415 FIELD_SIZEOF(struct sk_buff, cb));
416 packet = (struct hv_netvsc_packet *)skb->cb;
419 packet->q_idx = skb_get_queue_mapping(skb);
421 packet->total_data_buflen = skb->len;
423 rndis_msg = (struct rndis_message *)skb->head;
425 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
427 isvlan = skb->vlan_tci & VLAN_TAG_PRESENT;
429 /* Add the rndis header */
430 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
431 rndis_msg->msg_len = packet->total_data_buflen;
432 rndis_pkt = &rndis_msg->msg.pkt;
433 rndis_pkt->data_offset = sizeof(struct rndis_packet);
434 rndis_pkt->data_len = packet->total_data_buflen;
435 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
437 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
439 hash = skb_get_hash_raw(skb);
440 if (hash != 0 && net->real_num_tx_queues > 1) {
441 rndis_msg_size += NDIS_HASH_PPI_SIZE;
442 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
444 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
448 struct ndis_pkt_8021q_info *vlan;
450 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
451 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
453 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
455 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
456 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
460 net_trans_info = get_net_transport_info(skb, &hdr_offset);
461 if (net_trans_info == TRANSPORT_INFO_NOT_IP)
465 * Setup the sendside checksum offload only if this is not a
471 if ((skb->ip_summed == CHECKSUM_NONE) ||
472 (skb->ip_summed == CHECKSUM_UNNECESSARY))
475 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
476 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
477 TCPIP_CHKSUM_PKTINFO);
479 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
482 if (net_trans_info & (INFO_IPV4 << 16))
483 csum_info->transmit.is_ipv4 = 1;
485 csum_info->transmit.is_ipv6 = 1;
487 if (net_trans_info & INFO_TCP) {
488 csum_info->transmit.tcp_checksum = 1;
489 csum_info->transmit.tcp_header_offset = hdr_offset;
490 } else if (net_trans_info & INFO_UDP) {
491 /* UDP checksum offload is not supported on ws2008r2.
492 * Furthermore, on ws2012 and ws2012r2, there are some
493 * issues with udp checksum offload from Linux guests.
494 * (these are host issues).
495 * For now compute the checksum here.
500 ret = skb_cow_head(skb, 0);
505 udp_len = ntohs(uh->len);
507 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
509 udp_len, IPPROTO_UDP,
510 csum_partial(uh, udp_len, 0));
512 uh->check = CSUM_MANGLED_0;
514 csum_info->transmit.udp_checksum = 0;
519 rndis_msg_size += NDIS_LSO_PPI_SIZE;
520 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
521 TCP_LARGESEND_PKTINFO);
523 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
526 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
527 if (net_trans_info & (INFO_IPV4 << 16)) {
528 lso_info->lso_v2_transmit.ip_version =
529 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
530 ip_hdr(skb)->tot_len = 0;
531 ip_hdr(skb)->check = 0;
532 tcp_hdr(skb)->check =
533 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
534 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
536 lso_info->lso_v2_transmit.ip_version =
537 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
538 ipv6_hdr(skb)->payload_len = 0;
539 tcp_hdr(skb)->check =
540 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
541 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
543 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
544 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
547 /* Start filling in the page buffers with the rndis hdr */
548 rndis_msg->msg_len += rndis_msg_size;
549 packet->total_data_buflen = rndis_msg->msg_len;
550 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
553 /* timestamp packet in software */
554 skb_tx_timestamp(skb);
555 ret = netvsc_send(net_device_ctx->device_ctx, packet,
556 rndis_msg, &pb, skb);
560 u64_stats_update_begin(&tx_stats->syncp);
562 tx_stats->bytes += skb_length;
563 u64_stats_update_end(&tx_stats->syncp);
565 if (ret != -EAGAIN) {
566 dev_kfree_skb_any(skb);
567 net->stats.tx_dropped++;
571 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
575 * netvsc_linkstatus_callback - Link up/down notification
577 void netvsc_linkstatus_callback(struct hv_device *device_obj,
578 struct rndis_message *resp)
580 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
581 struct net_device *net;
582 struct net_device_context *ndev_ctx;
583 struct netvsc_device *net_device;
584 struct netvsc_reconfig *event;
587 /* Handle link change statuses only */
588 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
589 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
590 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
593 net_device = hv_get_drvdata(device_obj);
594 net = net_device->ndev;
596 if (!net || net->reg_state != NETREG_REGISTERED)
599 ndev_ctx = netdev_priv(net);
601 event = kzalloc(sizeof(*event), GFP_ATOMIC);
604 event->event = indicate->status;
606 spin_lock_irqsave(&ndev_ctx->lock, flags);
607 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
608 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
610 schedule_delayed_work(&ndev_ctx->dwork, 0);
614 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
615 struct hv_netvsc_packet *packet,
616 struct ndis_tcp_ip_checksum_info *csum_info,
617 void *data, u16 vlan_tci)
621 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
626 * Copy to skb. This copy is needed here since the memory pointed by
627 * hv_netvsc_packet cannot be deallocated
629 memcpy(skb_put(skb, packet->total_data_buflen), data,
630 packet->total_data_buflen);
632 skb->protocol = eth_type_trans(skb, net);
634 /* We only look at the IP checksum here.
635 * Should we be dropping the packet if checksum
636 * failed? How do we deal with other checksums - TCP/UDP?
638 if (csum_info->receive.ip_checksum_succeeded)
639 skb->ip_summed = CHECKSUM_UNNECESSARY;
641 skb->ip_summed = CHECKSUM_NONE;
644 if (vlan_tci & VLAN_TAG_PRESENT)
645 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
652 * netvsc_recv_callback - Callback when we receive a packet from the
653 * "wire" on the specified device.
655 int netvsc_recv_callback(struct hv_device *device_obj,
656 struct hv_netvsc_packet *packet,
658 struct ndis_tcp_ip_checksum_info *csum_info,
659 struct vmbus_channel *channel,
662 struct net_device *net;
663 struct net_device_context *net_device_ctx;
665 struct sk_buff *vf_skb;
666 struct netvsc_stats *rx_stats;
667 struct netvsc_device *netvsc_dev = hv_get_drvdata(device_obj);
668 u32 bytes_recvd = packet->total_data_buflen;
671 net = netvsc_dev->ndev;
672 if (!net || net->reg_state != NETREG_REGISTERED)
673 return NVSP_STAT_FAIL;
675 if (READ_ONCE(netvsc_dev->vf_inject)) {
676 atomic_inc(&netvsc_dev->vf_use_cnt);
677 if (!READ_ONCE(netvsc_dev->vf_inject)) {
679 * We raced; just move on.
681 atomic_dec(&netvsc_dev->vf_use_cnt);
682 goto vf_injection_done;
686 * Inject this packet into the VF inerface.
687 * On Hyper-V, multicast and brodcast packets
688 * are only delivered on the synthetic interface
689 * (after subjecting these to policy filters on
690 * the host). Deliver these via the VF interface
693 vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
694 csum_info, *data, vlan_tci);
695 if (vf_skb != NULL) {
696 ++netvsc_dev->vf_netdev->stats.rx_packets;
697 netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
698 netif_receive_skb(vf_skb);
700 ++net->stats.rx_dropped;
701 ret = NVSP_STAT_FAIL;
703 atomic_dec(&netvsc_dev->vf_use_cnt);
708 net_device_ctx = netdev_priv(net);
709 rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
711 /* Allocate a skb - TODO direct I/O to pages? */
712 skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
713 if (unlikely(!skb)) {
714 ++net->stats.rx_dropped;
715 return NVSP_STAT_FAIL;
717 skb_record_rx_queue(skb, channel->
718 offermsg.offer.sub_channel_index);
720 u64_stats_update_begin(&rx_stats->syncp);
722 rx_stats->bytes += packet->total_data_buflen;
723 u64_stats_update_end(&rx_stats->syncp);
726 * Pass the skb back up. Network stack will deallocate the skb when it
735 static void netvsc_get_drvinfo(struct net_device *net,
736 struct ethtool_drvinfo *info)
738 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
739 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
742 static void netvsc_get_channels(struct net_device *net,
743 struct ethtool_channels *channel)
745 struct net_device_context *net_device_ctx = netdev_priv(net);
746 struct hv_device *dev = net_device_ctx->device_ctx;
747 struct netvsc_device *nvdev = hv_get_drvdata(dev);
750 channel->max_combined = nvdev->max_chn;
751 channel->combined_count = nvdev->num_chn;
755 static int netvsc_set_channels(struct net_device *net,
756 struct ethtool_channels *channels)
758 struct net_device_context *net_device_ctx = netdev_priv(net);
759 struct hv_device *dev = net_device_ctx->device_ctx;
760 struct netvsc_device *nvdev = hv_get_drvdata(dev);
761 struct netvsc_device_info device_info;
765 bool recovering = false;
767 if (!nvdev || nvdev->destroy)
770 num_chn = nvdev->num_chn;
771 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
773 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
774 pr_info("vRSS unsupported before NVSP Version 5\n");
778 /* We do not support rx, tx, or other */
780 channels->rx_count ||
781 channels->tx_count ||
782 channels->other_count ||
783 (channels->combined_count < 1))
786 if (channels->combined_count > max_chn) {
787 pr_info("combined channels too high, using %d\n", max_chn);
788 channels->combined_count = max_chn;
791 ret = netvsc_close(net);
796 nvdev->start_remove = true;
797 rndis_filter_device_remove(dev);
799 nvdev->num_chn = channels->combined_count;
801 net_device_ctx->device_ctx = dev;
802 hv_set_drvdata(dev, net);
804 memset(&device_info, 0, sizeof(device_info));
805 device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
806 device_info.ring_size = ring_size;
807 device_info.max_num_vrss_chns = max_num_vrss_chns;
809 ret = rndis_filter_device_add(dev, &device_info);
812 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
818 nvdev = hv_get_drvdata(dev);
820 ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
823 netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
829 ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
832 netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
844 /* If the above failed, we attempt to recover through the same
845 * process but with the original number of channels.
847 netdev_err(net, "could not set channels, recovering\n");
849 channels->combined_count = num_chn;
853 static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
855 struct ethtool_cmd diff1 = *cmd;
856 struct ethtool_cmd diff2 = {};
858 ethtool_cmd_speed_set(&diff1, 0);
860 /* advertising and cmd are usually set */
861 diff1.advertising = 0;
863 /* We set port to PORT_OTHER */
864 diff2.port = PORT_OTHER;
866 return !memcmp(&diff1, &diff2, sizeof(diff1));
869 static void netvsc_init_settings(struct net_device *dev)
871 struct net_device_context *ndc = netdev_priv(dev);
873 ndc->speed = SPEED_UNKNOWN;
874 ndc->duplex = DUPLEX_UNKNOWN;
877 static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
879 struct net_device_context *ndc = netdev_priv(dev);
881 ethtool_cmd_speed_set(cmd, ndc->speed);
882 cmd->duplex = ndc->duplex;
883 cmd->port = PORT_OTHER;
888 static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
890 struct net_device_context *ndc = netdev_priv(dev);
893 speed = ethtool_cmd_speed(cmd);
894 if (!ethtool_validate_speed(speed) ||
895 !ethtool_validate_duplex(cmd->duplex) ||
896 !netvsc_validate_ethtool_ss_cmd(cmd))
900 ndc->duplex = cmd->duplex;
905 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
907 struct net_device_context *ndevctx = netdev_priv(ndev);
908 struct hv_device *hdev = ndevctx->device_ctx;
909 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
910 struct netvsc_device_info device_info;
911 int limit = ETH_DATA_LEN;
915 if (nvdev == NULL || nvdev->destroy)
918 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
919 limit = NETVSC_MTU - ETH_HLEN;
921 if (mtu < NETVSC_MTU_MIN || mtu > limit)
924 ret = netvsc_close(ndev);
928 num_chn = nvdev->num_chn;
930 nvdev->start_remove = true;
931 rndis_filter_device_remove(hdev);
935 ndevctx->device_ctx = hdev;
936 hv_set_drvdata(hdev, ndev);
938 memset(&device_info, 0, sizeof(device_info));
939 device_info.ring_size = ring_size;
940 device_info.num_chn = num_chn;
941 device_info.max_num_vrss_chns = max_num_vrss_chns;
942 rndis_filter_device_add(hdev, &device_info);
950 static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
951 struct rtnl_link_stats64 *t)
953 struct net_device_context *ndev_ctx = netdev_priv(net);
956 for_each_possible_cpu(cpu) {
957 struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
959 struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
961 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
965 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
966 tx_packets = tx_stats->packets;
967 tx_bytes = tx_stats->bytes;
968 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
971 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
972 rx_packets = rx_stats->packets;
973 rx_bytes = rx_stats->bytes;
974 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
976 t->tx_bytes += tx_bytes;
977 t->tx_packets += tx_packets;
978 t->rx_bytes += rx_bytes;
979 t->rx_packets += rx_packets;
982 t->tx_dropped = net->stats.tx_dropped;
983 t->tx_errors = net->stats.tx_dropped;
985 t->rx_dropped = net->stats.rx_dropped;
986 t->rx_errors = net->stats.rx_errors;
991 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
993 struct net_device_context *ndevctx = netdev_priv(ndev);
994 struct hv_device *hdev = ndevctx->device_ctx;
995 struct sockaddr *addr = p;
996 char save_adr[ETH_ALEN];
997 unsigned char save_aatype;
1000 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
1001 save_aatype = ndev->addr_assign_type;
1003 err = eth_mac_addr(ndev, p);
1007 err = rndis_filter_set_device_mac(hdev, addr->sa_data);
1009 /* roll back to saved MAC */
1010 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
1011 ndev->addr_assign_type = save_aatype;
1017 #ifdef CONFIG_NET_POLL_CONTROLLER
1018 static void netvsc_poll_controller(struct net_device *net)
1020 /* As netvsc_start_xmit() works synchronous we don't have to
1021 * trigger anything here.
1026 static const struct ethtool_ops ethtool_ops = {
1027 .get_drvinfo = netvsc_get_drvinfo,
1028 .get_link = ethtool_op_get_link,
1029 .get_channels = netvsc_get_channels,
1030 .set_channels = netvsc_set_channels,
1031 .get_ts_info = ethtool_op_get_ts_info,
1032 .get_settings = netvsc_get_settings,
1033 .set_settings = netvsc_set_settings,
1036 static const struct net_device_ops device_ops = {
1037 .ndo_open = netvsc_open,
1038 .ndo_stop = netvsc_close,
1039 .ndo_start_xmit = netvsc_start_xmit,
1040 .ndo_set_rx_mode = netvsc_set_multicast_list,
1041 .ndo_change_mtu = netvsc_change_mtu,
1042 .ndo_validate_addr = eth_validate_addr,
1043 .ndo_set_mac_address = netvsc_set_mac_addr,
1044 .ndo_select_queue = netvsc_select_queue,
1045 .ndo_get_stats64 = netvsc_get_stats64,
1046 #ifdef CONFIG_NET_POLL_CONTROLLER
1047 .ndo_poll_controller = netvsc_poll_controller,
1052 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1053 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1054 * present send GARP packet to network peers with netif_notify_peers().
1056 static void netvsc_link_change(struct work_struct *w)
1058 struct net_device_context *ndev_ctx;
1059 struct net_device *net;
1060 struct netvsc_device *net_device;
1061 struct rndis_device *rdev;
1062 struct netvsc_reconfig *event = NULL;
1063 bool notify = false, reschedule = false;
1064 unsigned long flags, next_reconfig, delay;
1066 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
1067 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
1068 rdev = net_device->extension;
1069 net = net_device->ndev;
1071 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1072 if (time_is_after_jiffies(next_reconfig)) {
1073 /* link_watch only sends one notification with current state
1074 * per second, avoid doing reconfig more frequently. Handle
1077 delay = next_reconfig - jiffies;
1078 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1079 schedule_delayed_work(&ndev_ctx->dwork, delay);
1082 ndev_ctx->last_reconfig = jiffies;
1084 spin_lock_irqsave(&ndev_ctx->lock, flags);
1085 if (!list_empty(&ndev_ctx->reconfig_events)) {
1086 event = list_first_entry(&ndev_ctx->reconfig_events,
1087 struct netvsc_reconfig, list);
1088 list_del(&event->list);
1089 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1091 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1098 switch (event->event) {
1099 /* Only the following events are possible due to the check in
1100 * netvsc_linkstatus_callback()
1102 case RNDIS_STATUS_MEDIA_CONNECT:
1103 if (rdev->link_state) {
1104 rdev->link_state = false;
1105 netif_carrier_on(net);
1106 netif_tx_wake_all_queues(net);
1112 case RNDIS_STATUS_MEDIA_DISCONNECT:
1113 if (!rdev->link_state) {
1114 rdev->link_state = true;
1115 netif_carrier_off(net);
1116 netif_tx_stop_all_queues(net);
1120 case RNDIS_STATUS_NETWORK_CHANGE:
1121 /* Only makes sense if carrier is present */
1122 if (!rdev->link_state) {
1123 rdev->link_state = true;
1124 netif_carrier_off(net);
1125 netif_tx_stop_all_queues(net);
1126 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1127 spin_lock_irqsave(&ndev_ctx->lock, flags);
1128 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
1129 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1138 netdev_notify_peers(net);
1140 /* link_watch only sends one notification with current state per
1141 * second, handle next reconfig event in 2 seconds.
1144 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1147 static void netvsc_free_netdev(struct net_device *netdev)
1149 struct net_device_context *net_device_ctx = netdev_priv(netdev);
1151 free_percpu(net_device_ctx->tx_stats);
1152 free_percpu(net_device_ctx->rx_stats);
1153 free_netdev(netdev);
1156 static void netvsc_notify_peers(struct work_struct *wrk)
1158 struct garp_wrk *gwrk;
1160 gwrk = container_of(wrk, struct garp_wrk, dwrk);
1162 netdev_notify_peers(gwrk->netdev);
1164 atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
1167 static struct netvsc_device *get_netvsc_device(char *mac)
1169 struct net_device *dev;
1170 struct net_device_context *netvsc_ctx = NULL;
1173 rtnl_locked = rtnl_trylock();
1175 for_each_netdev(&init_net, dev) {
1176 if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
1177 if (dev->netdev_ops != &device_ops)
1179 netvsc_ctx = netdev_priv(dev);
1186 if (netvsc_ctx == NULL)
1189 return hv_get_drvdata(netvsc_ctx->device_ctx);
1192 static int netvsc_register_vf(struct net_device *vf_netdev)
1194 struct netvsc_device *netvsc_dev;
1195 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
1197 if (eth_ops == NULL || eth_ops == ðtool_ops)
1201 * We will use the MAC address to locate the synthetic interface to
1202 * associate with the VF interface. If we don't find a matching
1203 * synthetic interface, move on.
1205 netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
1206 if (netvsc_dev == NULL)
1209 netdev_info(netvsc_dev->ndev, "VF registering: %s\n", vf_netdev->name);
1211 * Take a reference on the module.
1213 try_module_get(THIS_MODULE);
1214 netvsc_dev->vf_netdev = vf_netdev;
1219 static int netvsc_vf_up(struct net_device *vf_netdev)
1221 struct netvsc_device *netvsc_dev;
1222 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
1223 struct net_device_context *net_device_ctx;
1225 if (eth_ops == ðtool_ops)
1228 netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
1230 if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
1233 netdev_info(netvsc_dev->ndev, "VF up: %s\n", vf_netdev->name);
1234 net_device_ctx = netdev_priv(netvsc_dev->ndev);
1235 netvsc_dev->vf_inject = true;
1238 * Open the device before switching data path.
1240 rndis_filter_open(net_device_ctx->device_ctx);
1243 * notify the host to switch the data path.
1245 netvsc_switch_datapath(netvsc_dev, true);
1246 netdev_info(netvsc_dev->ndev, "Data path switched to VF: %s\n",
1249 netif_carrier_off(netvsc_dev->ndev);
1252 * Now notify peers. We are scheduling work to
1253 * notify peers; take a reference to prevent
1254 * the VF interface from vanishing.
1256 atomic_inc(&netvsc_dev->vf_use_cnt);
1257 net_device_ctx->gwrk.netdev = vf_netdev;
1258 net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
1259 schedule_work(&net_device_ctx->gwrk.dwrk);
1265 static int netvsc_vf_down(struct net_device *vf_netdev)
1267 struct netvsc_device *netvsc_dev;
1268 struct net_device_context *net_device_ctx;
1269 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
1271 if (eth_ops == ðtool_ops)
1274 netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
1276 if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
1279 netdev_info(netvsc_dev->ndev, "VF down: %s\n", vf_netdev->name);
1280 net_device_ctx = netdev_priv(netvsc_dev->ndev);
1281 netvsc_dev->vf_inject = false;
1283 * Wait for currently active users to
1287 while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
1289 netvsc_switch_datapath(netvsc_dev, false);
1290 netdev_info(netvsc_dev->ndev, "Data path switched from VF: %s\n",
1292 rndis_filter_close(net_device_ctx->device_ctx);
1293 netif_carrier_on(netvsc_dev->ndev);
1297 atomic_inc(&netvsc_dev->vf_use_cnt);
1298 net_device_ctx->gwrk.netdev = netvsc_dev->ndev;
1299 net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
1300 schedule_work(&net_device_ctx->gwrk.dwrk);
1306 static int netvsc_unregister_vf(struct net_device *vf_netdev)
1308 struct netvsc_device *netvsc_dev;
1309 const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
1311 if (eth_ops == ðtool_ops)
1314 netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
1315 if (netvsc_dev == NULL)
1317 netdev_info(netvsc_dev->ndev, "VF unregistering: %s\n",
1320 netvsc_dev->vf_netdev = NULL;
1321 module_put(THIS_MODULE);
1325 static int netvsc_probe(struct hv_device *dev,
1326 const struct hv_vmbus_device_id *dev_id)
1328 struct net_device *net = NULL;
1329 struct net_device_context *net_device_ctx;
1330 struct netvsc_device_info device_info;
1331 struct netvsc_device *nvdev;
1334 net = alloc_etherdev_mq(sizeof(struct net_device_context),
1339 netif_carrier_off(net);
1341 net_device_ctx = netdev_priv(net);
1342 net_device_ctx->device_ctx = dev;
1343 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1344 if (netif_msg_probe(net_device_ctx))
1345 netdev_dbg(net, "netvsc msg_enable: %d\n",
1346 net_device_ctx->msg_enable);
1348 net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1349 if (!net_device_ctx->tx_stats) {
1353 net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1354 if (!net_device_ctx->rx_stats) {
1355 free_percpu(net_device_ctx->tx_stats);
1360 hv_set_drvdata(dev, net);
1361 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
1362 INIT_WORK(&net_device_ctx->work, do_set_multicast);
1363 INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
1365 spin_lock_init(&net_device_ctx->lock);
1366 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1368 net->netdev_ops = &device_ops;
1370 net->hw_features = NETVSC_HW_FEATURES;
1371 net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
1373 net->ethtool_ops = ðtool_ops;
1374 SET_NETDEV_DEV(net, &dev->device);
1376 /* We always need headroom for rndis header */
1377 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1379 /* Notify the netvsc driver of the new device */
1380 memset(&device_info, 0, sizeof(device_info));
1381 device_info.ring_size = ring_size;
1382 device_info.max_num_vrss_chns = max_num_vrss_chns;
1383 ret = rndis_filter_device_add(dev, &device_info);
1385 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
1386 netvsc_free_netdev(net);
1387 hv_set_drvdata(dev, NULL);
1390 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1392 nvdev = hv_get_drvdata(dev);
1393 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1394 netif_set_real_num_rx_queues(net, nvdev->num_chn);
1396 netvsc_init_settings(net);
1398 ret = register_netdev(net);
1400 pr_err("Unable to register netdev.\n");
1401 rndis_filter_device_remove(dev);
1402 netvsc_free_netdev(net);
1408 static int netvsc_remove(struct hv_device *dev)
1410 struct net_device *net;
1411 struct net_device_context *ndev_ctx;
1412 struct netvsc_device *net_device;
1414 net_device = hv_get_drvdata(dev);
1415 net = net_device->ndev;
1418 dev_err(&dev->device, "No net device to remove\n");
1422 net_device->start_remove = true;
1424 ndev_ctx = netdev_priv(net);
1425 cancel_delayed_work_sync(&ndev_ctx->dwork);
1426 cancel_work_sync(&ndev_ctx->work);
1428 /* Stop outbound asap */
1429 netif_tx_disable(net);
1431 unregister_netdev(net);
1434 * Call to the vsc driver to let it know that the device is being
1437 rndis_filter_device_remove(dev);
1439 netvsc_free_netdev(net);
1443 static const struct hv_vmbus_device_id id_table[] = {
1449 MODULE_DEVICE_TABLE(vmbus, id_table);
1451 /* The one and only one */
1452 static struct hv_driver netvsc_drv = {
1453 .name = KBUILD_MODNAME,
1454 .id_table = id_table,
1455 .probe = netvsc_probe,
1456 .remove = netvsc_remove,
1461 * On Hyper-V, every VF interface is matched with a corresponding
1462 * synthetic interface. The synthetic interface is presented first
1463 * to the guest. When the corresponding VF instance is registered,
1464 * we will take care of switching the data path.
1466 static int netvsc_netdev_event(struct notifier_block *this,
1467 unsigned long event, void *ptr)
1469 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1472 case NETDEV_REGISTER:
1473 return netvsc_register_vf(event_dev);
1474 case NETDEV_UNREGISTER:
1475 return netvsc_unregister_vf(event_dev);
1477 return netvsc_vf_up(event_dev);
1479 return netvsc_vf_down(event_dev);
1485 static struct notifier_block netvsc_netdev_notifier = {
1486 .notifier_call = netvsc_netdev_event,
1489 static void __exit netvsc_drv_exit(void)
1491 unregister_netdevice_notifier(&netvsc_netdev_notifier);
1492 vmbus_driver_unregister(&netvsc_drv);
1495 static int __init netvsc_drv_init(void)
1499 if (ring_size < RING_SIZE_MIN) {
1500 ring_size = RING_SIZE_MIN;
1501 pr_info("Increased ring_size to %d (min allowed)\n",
1504 ret = vmbus_driver_register(&netvsc_drv);
1509 register_netdevice_notifier(&netvsc_netdev_notifier);
1513 MODULE_LICENSE("GPL");
1514 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
1516 module_init(netvsc_drv_init);
1517 module_exit(netvsc_drv_exit);