1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.12.1-k"
62 const char ixgbevf_driver_version[] = DRV_VERSION;
63 static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82 /* required last entry */
85 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION);
92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93 static int debug = -1;
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
98 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
99 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
100 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
102 static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
105 rx_ring->next_to_use = val;
108 * Force memory writes to complete before letting h/w
109 * know there are new descriptors to fetch. (Only
110 * applicable for weak-ordered memory model archs,
114 writel(val, rx_ring->tail);
118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
119 * @adapter: pointer to adapter struct
120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
121 * @queue: queue to map the corresponding interrupt to
122 * @msix_vector: the vector to map to the corresponding queue
124 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
125 u8 queue, u8 msix_vector)
128 struct ixgbe_hw *hw = &adapter->hw;
129 if (direction == -1) {
131 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
132 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
135 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
137 /* tx or rx causes */
138 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
139 index = ((16 * (queue & 1)) + (8 * direction));
140 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
141 ivar &= ~(0xFF << index);
142 ivar |= (msix_vector << index);
143 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
147 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
148 struct ixgbevf_tx_buffer
151 if (tx_buffer_info->dma) {
152 if (tx_buffer_info->mapped_as_page)
153 dma_unmap_page(tx_ring->dev,
155 tx_buffer_info->length,
158 dma_unmap_single(tx_ring->dev,
160 tx_buffer_info->length,
162 tx_buffer_info->dma = 0;
164 if (tx_buffer_info->skb) {
165 dev_kfree_skb_any(tx_buffer_info->skb);
166 tx_buffer_info->skb = NULL;
168 tx_buffer_info->time_stamp = 0;
169 /* tx_buffer_info must be completely set up in the transmit path */
172 #define IXGBE_MAX_TXD_PWR 14
173 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
175 /* Tx Descriptors needed, worst case */
176 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
177 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
179 static void ixgbevf_tx_timeout(struct net_device *netdev);
182 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
183 * @q_vector: board private structure
184 * @tx_ring: tx ring to clean
186 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
187 struct ixgbevf_ring *tx_ring)
189 struct ixgbevf_adapter *adapter = q_vector->adapter;
190 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
191 struct ixgbevf_tx_buffer *tx_buffer_info;
192 unsigned int i, count = 0;
193 unsigned int total_bytes = 0, total_packets = 0;
195 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
198 i = tx_ring->next_to_clean;
199 tx_buffer_info = &tx_ring->tx_buffer_info[i];
200 eop_desc = tx_buffer_info->next_to_watch;
203 bool cleaned = false;
205 /* if next_to_watch is not set then there is no work pending */
209 /* prevent any other reads prior to eop_desc */
210 read_barrier_depends();
212 /* if DD is not set pending work has not been completed */
213 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
216 /* clear next_to_watch to prevent false hangs */
217 tx_buffer_info->next_to_watch = NULL;
219 for ( ; !cleaned; count++) {
221 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
222 cleaned = (tx_desc == eop_desc);
223 skb = tx_buffer_info->skb;
225 if (cleaned && skb) {
226 unsigned int segs, bytecount;
228 /* gso_segs is currently only valid for tcp */
229 segs = skb_shinfo(skb)->gso_segs ?: 1;
230 /* multiply data chunks by size of headers */
231 bytecount = ((segs - 1) * skb_headlen(skb)) +
233 total_packets += segs;
234 total_bytes += bytecount;
237 ixgbevf_unmap_and_free_tx_resource(tx_ring,
240 tx_desc->wb.status = 0;
243 if (i == tx_ring->count)
246 tx_buffer_info = &tx_ring->tx_buffer_info[i];
249 eop_desc = tx_buffer_info->next_to_watch;
250 } while (count < tx_ring->count);
252 tx_ring->next_to_clean = i;
254 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
255 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
256 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
257 /* Make sure that anybody stopping the queue after this
258 * sees the new next_to_clean.
261 if (__netif_subqueue_stopped(tx_ring->netdev,
262 tx_ring->queue_index) &&
263 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
264 netif_wake_subqueue(tx_ring->netdev,
265 tx_ring->queue_index);
266 ++adapter->restart_queue;
270 u64_stats_update_begin(&tx_ring->syncp);
271 tx_ring->total_bytes += total_bytes;
272 tx_ring->total_packets += total_packets;
273 u64_stats_update_end(&tx_ring->syncp);
274 q_vector->tx.total_bytes += total_bytes;
275 q_vector->tx.total_packets += total_packets;
277 return count < tx_ring->count;
281 * ixgbevf_receive_skb - Send a completed packet up the stack
282 * @q_vector: structure containing interrupt and ring information
283 * @skb: packet to send up
284 * @status: hardware indication of status of receive
285 * @rx_desc: rx descriptor
287 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
288 struct sk_buff *skb, u8 status,
289 union ixgbe_adv_rx_desc *rx_desc)
291 struct ixgbevf_adapter *adapter = q_vector->adapter;
292 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
293 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
295 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
296 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
298 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
299 napi_gro_receive(&q_vector->napi, skb);
305 * ixgbevf_rx_skb - Helper function to determine proper Rx method
306 * @q_vector: structure containing interrupt and ring information
307 * @skb: packet to send up
308 * @status: hardware indication of status of receive
309 * @rx_desc: rx descriptor
311 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
312 struct sk_buff *skb, u8 status,
313 union ixgbe_adv_rx_desc *rx_desc)
315 #ifdef CONFIG_NET_RX_BUSY_POLL
316 skb_mark_napi_id(skb, &q_vector->napi);
318 if (ixgbevf_qv_busy_polling(q_vector)) {
319 netif_receive_skb(skb);
320 /* exit early if we busy polled */
323 #endif /* CONFIG_NET_RX_BUSY_POLL */
325 ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
329 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
330 * @ring: pointer to Rx descriptor ring structure
331 * @status_err: hardware indication of status of receive
332 * @skb: skb currently being received and modified
334 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
335 u32 status_err, struct sk_buff *skb)
337 skb_checksum_none_assert(skb);
339 /* Rx csum disabled */
340 if (!(ring->netdev->features & NETIF_F_RXCSUM))
343 /* if IP and error */
344 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
345 (status_err & IXGBE_RXDADV_ERR_IPE)) {
346 ring->hw_csum_rx_error++;
350 if (!(status_err & IXGBE_RXD_STAT_L4CS))
353 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
354 ring->hw_csum_rx_error++;
358 /* It must be a TCP or UDP packet with a valid checksum */
359 skb->ip_summed = CHECKSUM_UNNECESSARY;
360 ring->hw_csum_rx_good++;
364 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
365 * @adapter: address of board private structure
367 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
368 struct ixgbevf_ring *rx_ring,
371 union ixgbe_adv_rx_desc *rx_desc;
372 struct ixgbevf_rx_buffer *bi;
373 unsigned int i = rx_ring->next_to_use;
375 while (cleaned_count--) {
376 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
377 bi = &rx_ring->rx_buffer_info[i];
382 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
383 rx_ring->rx_buf_len);
389 bi->dma = dma_map_single(rx_ring->dev, skb->data,
392 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
395 dev_err(rx_ring->dev, "Rx DMA map failed\n");
399 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
402 if (i == rx_ring->count)
407 adapter->alloc_rx_buff_failed++;
408 if (rx_ring->next_to_use != i)
409 ixgbevf_release_rx_desc(rx_ring, i);
412 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
415 struct ixgbe_hw *hw = &adapter->hw;
417 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
420 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
421 struct ixgbevf_ring *rx_ring,
424 struct ixgbevf_adapter *adapter = q_vector->adapter;
425 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
426 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
430 int cleaned_count = 0;
431 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
433 i = rx_ring->next_to_clean;
434 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
435 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
436 rx_buffer_info = &rx_ring->rx_buffer_info[i];
438 while (staterr & IXGBE_RXD_STAT_DD) {
443 rmb(); /* read descriptor and rx_buffer_info after status DD */
444 len = le16_to_cpu(rx_desc->wb.upper.length);
445 skb = rx_buffer_info->skb;
446 prefetch(skb->data - NET_IP_ALIGN);
447 rx_buffer_info->skb = NULL;
449 if (rx_buffer_info->dma) {
450 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
453 rx_buffer_info->dma = 0;
458 if (i == rx_ring->count)
461 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
465 next_buffer = &rx_ring->rx_buffer_info[i];
467 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
468 skb->next = next_buffer->skb;
469 IXGBE_CB(skb->next)->prev = skb;
470 adapter->non_eop_descs++;
474 /* we should not be chaining buffers, if we did drop the skb */
475 if (IXGBE_CB(skb)->prev) {
477 struct sk_buff *this = skb;
478 skb = IXGBE_CB(skb)->prev;
484 /* ERR_MASK will only have valid bits if EOP set */
485 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
486 dev_kfree_skb_irq(skb);
490 ixgbevf_rx_checksum(rx_ring, staterr, skb);
492 /* probably a little skewed due to removing CRC */
493 total_rx_bytes += skb->len;
496 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
498 /* Workaround hardware that can't do proper VEPA multicast
501 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
502 ether_addr_equal(adapter->netdev->dev_addr,
503 eth_hdr(skb)->h_source)) {
504 dev_kfree_skb_irq(skb);
508 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
511 rx_desc->wb.upper.status_error = 0;
513 /* return some buffers to hardware, one at a time is too slow */
514 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
515 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
520 /* use prefetched values */
522 rx_buffer_info = &rx_ring->rx_buffer_info[i];
524 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
527 rx_ring->next_to_clean = i;
528 cleaned_count = ixgbevf_desc_unused(rx_ring);
531 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
533 u64_stats_update_begin(&rx_ring->syncp);
534 rx_ring->total_packets += total_rx_packets;
535 rx_ring->total_bytes += total_rx_bytes;
536 u64_stats_update_end(&rx_ring->syncp);
537 q_vector->rx.total_packets += total_rx_packets;
538 q_vector->rx.total_bytes += total_rx_bytes;
540 return total_rx_packets;
544 * ixgbevf_poll - NAPI polling calback
545 * @napi: napi struct with our devices info in it
546 * @budget: amount of work driver is allowed to do this pass, in packets
548 * This function will clean more than one or more rings associated with a
551 static int ixgbevf_poll(struct napi_struct *napi, int budget)
553 struct ixgbevf_q_vector *q_vector =
554 container_of(napi, struct ixgbevf_q_vector, napi);
555 struct ixgbevf_adapter *adapter = q_vector->adapter;
556 struct ixgbevf_ring *ring;
558 bool clean_complete = true;
560 ixgbevf_for_each_ring(ring, q_vector->tx)
561 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
563 #ifdef CONFIG_NET_RX_BUSY_POLL
564 if (!ixgbevf_qv_lock_napi(q_vector))
568 /* attempt to distribute budget to each queue fairly, but don't allow
569 * the budget to go below 1 because we'll exit polling */
570 if (q_vector->rx.count > 1)
571 per_ring_budget = max(budget/q_vector->rx.count, 1);
573 per_ring_budget = budget;
575 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
576 ixgbevf_for_each_ring(ring, q_vector->rx)
577 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
580 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
582 #ifdef CONFIG_NET_RX_BUSY_POLL
583 ixgbevf_qv_unlock_napi(q_vector);
586 /* If all work not completed, return budget and keep polling */
589 /* all work done, exit the polling mode */
591 if (adapter->rx_itr_setting & 1)
592 ixgbevf_set_itr(q_vector);
593 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
594 ixgbevf_irq_enable_queues(adapter,
595 1 << q_vector->v_idx);
601 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
602 * @q_vector: structure containing interrupt and ring information
604 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
606 struct ixgbevf_adapter *adapter = q_vector->adapter;
607 struct ixgbe_hw *hw = &adapter->hw;
608 int v_idx = q_vector->v_idx;
609 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
612 * set the WDIS bit to not clear the timer bits and cause an
613 * immediate assertion of the interrupt
615 itr_reg |= IXGBE_EITR_CNT_WDIS;
617 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
620 #ifdef CONFIG_NET_RX_BUSY_POLL
621 /* must be called with local_bh_disable()d */
622 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
624 struct ixgbevf_q_vector *q_vector =
625 container_of(napi, struct ixgbevf_q_vector, napi);
626 struct ixgbevf_adapter *adapter = q_vector->adapter;
627 struct ixgbevf_ring *ring;
630 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
631 return LL_FLUSH_FAILED;
633 if (!ixgbevf_qv_lock_poll(q_vector))
634 return LL_FLUSH_BUSY;
636 ixgbevf_for_each_ring(ring, q_vector->rx) {
637 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
638 #ifdef BP_EXTENDED_STATS
640 ring->bp_cleaned += found;
648 ixgbevf_qv_unlock_poll(q_vector);
652 #endif /* CONFIG_NET_RX_BUSY_POLL */
655 * ixgbevf_configure_msix - Configure MSI-X hardware
656 * @adapter: board private structure
658 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
661 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
663 struct ixgbevf_q_vector *q_vector;
664 int q_vectors, v_idx;
666 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
667 adapter->eims_enable_mask = 0;
670 * Populate the IVAR table and set the ITR values to the
671 * corresponding register.
673 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
674 struct ixgbevf_ring *ring;
675 q_vector = adapter->q_vector[v_idx];
677 ixgbevf_for_each_ring(ring, q_vector->rx)
678 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
680 ixgbevf_for_each_ring(ring, q_vector->tx)
681 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
683 if (q_vector->tx.ring && !q_vector->rx.ring) {
685 if (adapter->tx_itr_setting == 1)
686 q_vector->itr = IXGBE_10K_ITR;
688 q_vector->itr = adapter->tx_itr_setting;
690 /* rx or rx/tx vector */
691 if (adapter->rx_itr_setting == 1)
692 q_vector->itr = IXGBE_20K_ITR;
694 q_vector->itr = adapter->rx_itr_setting;
697 /* add q_vector eims value to global eims_enable_mask */
698 adapter->eims_enable_mask |= 1 << v_idx;
700 ixgbevf_write_eitr(q_vector);
703 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
704 /* setup eims_other and add value to global eims_enable_mask */
705 adapter->eims_other = 1 << v_idx;
706 adapter->eims_enable_mask |= adapter->eims_other;
713 latency_invalid = 255
717 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
718 * @q_vector: structure containing interrupt and ring information
719 * @ring_container: structure containing ring performance data
721 * Stores a new ITR value based on packets and byte
722 * counts during the last interrupt. The advantage of per interrupt
723 * computation is faster updates and more accurate ITR for the current
724 * traffic pattern. Constants in this function were computed
725 * based on theoretical maximum wire speed and thresholds were set based
726 * on testing data as well as attempting to minimize response time
727 * while increasing bulk throughput.
729 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
730 struct ixgbevf_ring_container *ring_container)
732 int bytes = ring_container->total_bytes;
733 int packets = ring_container->total_packets;
736 u8 itr_setting = ring_container->itr;
741 /* simple throttlerate management
742 * 0-20MB/s lowest (100000 ints/s)
743 * 20-100MB/s low (20000 ints/s)
744 * 100-1249MB/s bulk (8000 ints/s)
746 /* what was last interrupt timeslice? */
747 timepassed_us = q_vector->itr >> 2;
748 bytes_perint = bytes / timepassed_us; /* bytes/usec */
750 switch (itr_setting) {
752 if (bytes_perint > 10)
753 itr_setting = low_latency;
756 if (bytes_perint > 20)
757 itr_setting = bulk_latency;
758 else if (bytes_perint <= 10)
759 itr_setting = lowest_latency;
762 if (bytes_perint <= 20)
763 itr_setting = low_latency;
767 /* clear work counters since we have the values we need */
768 ring_container->total_bytes = 0;
769 ring_container->total_packets = 0;
771 /* write updated itr to ring container */
772 ring_container->itr = itr_setting;
775 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
777 u32 new_itr = q_vector->itr;
780 ixgbevf_update_itr(q_vector, &q_vector->tx);
781 ixgbevf_update_itr(q_vector, &q_vector->rx);
783 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
785 switch (current_itr) {
786 /* counts and packets in update_itr are dependent on these numbers */
788 new_itr = IXGBE_100K_ITR;
791 new_itr = IXGBE_20K_ITR;
795 new_itr = IXGBE_8K_ITR;
799 if (new_itr != q_vector->itr) {
800 /* do an exponential smoothing */
801 new_itr = (10 * new_itr * q_vector->itr) /
802 ((9 * new_itr) + q_vector->itr);
804 /* save the algorithm value here */
805 q_vector->itr = new_itr;
807 ixgbevf_write_eitr(q_vector);
811 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
813 struct ixgbevf_adapter *adapter = data;
814 struct ixgbe_hw *hw = &adapter->hw;
816 hw->mac.get_link_status = 1;
818 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
819 mod_timer(&adapter->watchdog_timer, jiffies);
821 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
827 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
829 * @data: pointer to our q_vector struct for this interrupt vector
831 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
833 struct ixgbevf_q_vector *q_vector = data;
835 /* EIAM disabled interrupts (on this vector) for us */
836 if (q_vector->rx.ring || q_vector->tx.ring)
837 napi_schedule(&q_vector->napi);
842 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
845 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
847 a->rx_ring[r_idx]->next = q_vector->rx.ring;
848 q_vector->rx.ring = a->rx_ring[r_idx];
849 q_vector->rx.count++;
852 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
855 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
857 a->tx_ring[t_idx]->next = q_vector->tx.ring;
858 q_vector->tx.ring = a->tx_ring[t_idx];
859 q_vector->tx.count++;
863 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
864 * @adapter: board private structure to initialize
866 * This function maps descriptor rings to the queue-specific vectors
867 * we were allotted through the MSI-X enabling code. Ideally, we'd have
868 * one vector per ring/queue, but on a constrained vector budget, we
869 * group the rings as "efficiently" as possible. You would add new
870 * mapping configurations in here.
872 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
876 int rxr_idx = 0, txr_idx = 0;
877 int rxr_remaining = adapter->num_rx_queues;
878 int txr_remaining = adapter->num_tx_queues;
883 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
886 * The ideal configuration...
887 * We have enough vectors to map one per queue.
889 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
890 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
891 map_vector_to_rxq(adapter, v_start, rxr_idx);
893 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
894 map_vector_to_txq(adapter, v_start, txr_idx);
899 * If we don't have enough vectors for a 1-to-1
900 * mapping, we'll have to group them so there are
901 * multiple queues per vector.
903 /* Re-adjusting *qpv takes care of the remainder. */
904 for (i = v_start; i < q_vectors; i++) {
905 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
906 for (j = 0; j < rqpv; j++) {
907 map_vector_to_rxq(adapter, i, rxr_idx);
912 for (i = v_start; i < q_vectors; i++) {
913 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
914 for (j = 0; j < tqpv; j++) {
915 map_vector_to_txq(adapter, i, txr_idx);
926 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
927 * @adapter: board private structure
929 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
930 * interrupts from the kernel.
932 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
934 struct net_device *netdev = adapter->netdev;
935 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
939 for (vector = 0; vector < q_vectors; vector++) {
940 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
941 struct msix_entry *entry = &adapter->msix_entries[vector];
943 if (q_vector->tx.ring && q_vector->rx.ring) {
944 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
945 "%s-%s-%d", netdev->name, "TxRx", ri++);
947 } else if (q_vector->rx.ring) {
948 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
949 "%s-%s-%d", netdev->name, "rx", ri++);
950 } else if (q_vector->tx.ring) {
951 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
952 "%s-%s-%d", netdev->name, "tx", ti++);
954 /* skip this unused q_vector */
957 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
958 q_vector->name, q_vector);
961 "request_irq failed for MSIX interrupt "
963 goto free_queue_irqs;
967 err = request_irq(adapter->msix_entries[vector].vector,
968 &ixgbevf_msix_other, 0, netdev->name, adapter);
971 "request_irq for msix_other failed: %d\n", err);
972 goto free_queue_irqs;
980 free_irq(adapter->msix_entries[vector].vector,
981 adapter->q_vector[vector]);
983 /* This failure is non-recoverable - it indicates the system is
984 * out of MSIX vector resources and the VF driver cannot run
985 * without them. Set the number of msix vectors to zero
986 * indicating that not enough can be allocated. The error
987 * will be returned to the user indicating device open failed.
988 * Any further attempts to force the driver to open will also
989 * fail. The only way to recover is to unload the driver and
990 * reload it again. If the system has recovered some MSIX
991 * vectors then it may succeed.
993 adapter->num_msix_vectors = 0;
997 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
999 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1001 for (i = 0; i < q_vectors; i++) {
1002 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1003 q_vector->rx.ring = NULL;
1004 q_vector->tx.ring = NULL;
1005 q_vector->rx.count = 0;
1006 q_vector->tx.count = 0;
1011 * ixgbevf_request_irq - initialize interrupts
1012 * @adapter: board private structure
1014 * Attempts to configure interrupts using the best available
1015 * capabilities of the hardware and kernel.
1017 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1021 err = ixgbevf_request_msix_irqs(adapter);
1024 hw_dbg(&adapter->hw,
1025 "request_irq failed, Error %d\n", err);
1030 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1034 q_vectors = adapter->num_msix_vectors;
1037 free_irq(adapter->msix_entries[i].vector, adapter);
1040 for (; i >= 0; i--) {
1041 /* free only the irqs that were actually requested */
1042 if (!adapter->q_vector[i]->rx.ring &&
1043 !adapter->q_vector[i]->tx.ring)
1046 free_irq(adapter->msix_entries[i].vector,
1047 adapter->q_vector[i]);
1050 ixgbevf_reset_q_vectors(adapter);
1054 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1055 * @adapter: board private structure
1057 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1059 struct ixgbe_hw *hw = &adapter->hw;
1062 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1063 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1064 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1066 IXGBE_WRITE_FLUSH(hw);
1068 for (i = 0; i < adapter->num_msix_vectors; i++)
1069 synchronize_irq(adapter->msix_entries[i].vector);
1073 * ixgbevf_irq_enable - Enable default interrupt generation settings
1074 * @adapter: board private structure
1076 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1078 struct ixgbe_hw *hw = &adapter->hw;
1080 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1081 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1082 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1086 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1087 * @adapter: board private structure
1088 * @ring: structure containing ring specific data
1090 * Configure the Tx descriptor ring after a reset.
1092 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1093 struct ixgbevf_ring *ring)
1095 struct ixgbe_hw *hw = &adapter->hw;
1096 u64 tdba = ring->dma;
1098 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1099 u8 reg_idx = ring->reg_idx;
1101 /* disable queue to avoid issues while updating state */
1102 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1103 IXGBE_WRITE_FLUSH(hw);
1105 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1106 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1107 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1108 ring->count * sizeof(union ixgbe_adv_tx_desc));
1110 /* disable head writeback */
1111 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1112 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1114 /* enable relaxed ordering */
1115 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1116 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1117 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1119 /* reset head and tail pointers */
1120 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1121 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1122 ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
1124 /* reset ntu and ntc to place SW in sync with hardwdare */
1125 ring->next_to_clean = 0;
1126 ring->next_to_use = 0;
1128 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1129 * to or less than the number of on chip descriptors, which is
1132 txdctl |= (8 << 16); /* WTHRESH = 8 */
1134 /* Setting PTHRESH to 32 both improves performance */
1135 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1136 32; /* PTHRESH = 32 */
1138 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1140 /* poll to verify queue is enabled */
1142 usleep_range(1000, 2000);
1143 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1144 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1146 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1150 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1151 * @adapter: board private structure
1153 * Configure the Tx unit of the MAC after a reset.
1155 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1159 /* Setup the HW Tx Head and Tail descriptor pointers */
1160 for (i = 0; i < adapter->num_tx_queues; i++)
1161 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1164 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1166 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1168 struct ixgbevf_ring *rx_ring;
1169 struct ixgbe_hw *hw = &adapter->hw;
1172 rx_ring = adapter->rx_ring[index];
1174 srrctl = IXGBE_SRRCTL_DROP_EN;
1176 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1178 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1179 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1181 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1184 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1186 struct ixgbe_hw *hw = &adapter->hw;
1188 /* PSRTYPE must be initialized in 82599 */
1189 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1190 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1191 IXGBE_PSRTYPE_L2HDR;
1193 if (adapter->num_rx_queues > 1)
1196 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1199 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1201 struct ixgbe_hw *hw = &adapter->hw;
1202 struct net_device *netdev = adapter->netdev;
1203 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1207 /* notify the PF of our intent to use this size of frame */
1208 ixgbevf_rlpml_set_vf(hw, max_frame);
1210 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1211 max_frame += VLAN_HLEN;
1214 * Allocate buffer sizes that fit well into 32K and
1215 * take into account max frame size of 9.5K
1217 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1218 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1219 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1220 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1221 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1222 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1223 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1224 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1225 rx_buf_len = IXGBEVF_RXBUFFER_8K;
1227 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1229 for (i = 0; i < adapter->num_rx_queues; i++)
1230 adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
1233 #define IXGBEVF_MAX_RX_DESC_POLL 10
1234 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1235 struct ixgbevf_ring *ring)
1237 struct ixgbe_hw *hw = &adapter->hw;
1238 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1240 u8 reg_idx = ring->reg_idx;
1242 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1243 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1245 /* write value back with RXDCTL.ENABLE bit cleared */
1246 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1248 /* the hardware may take up to 100us to really disable the rx queue */
1251 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1252 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1255 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1259 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1260 struct ixgbevf_ring *ring)
1262 struct ixgbe_hw *hw = &adapter->hw;
1263 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1265 u8 reg_idx = ring->reg_idx;
1268 usleep_range(1000, 2000);
1269 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1270 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1273 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1277 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1278 struct ixgbevf_ring *ring)
1280 struct ixgbe_hw *hw = &adapter->hw;
1281 u64 rdba = ring->dma;
1283 u8 reg_idx = ring->reg_idx;
1285 /* disable queue to avoid issues while updating state */
1286 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1287 ixgbevf_disable_rx_queue(adapter, ring);
1289 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1290 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1291 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1292 ring->count * sizeof(union ixgbe_adv_rx_desc));
1294 /* enable relaxed ordering */
1295 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1296 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1298 /* reset head and tail pointers */
1299 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1300 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1301 ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
1303 /* reset ntu and ntc to place SW in sync with hardwdare */
1304 ring->next_to_clean = 0;
1305 ring->next_to_use = 0;
1307 ixgbevf_configure_srrctl(adapter, reg_idx);
1309 /* prevent DMA from exceeding buffer space available */
1310 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1311 rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
1312 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1313 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1315 ixgbevf_rx_desc_queue_enable(adapter, ring);
1316 ixgbevf_alloc_rx_buffers(adapter, ring, ixgbevf_desc_unused(ring));
1320 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1321 * @adapter: board private structure
1323 * Configure the Rx unit of the MAC after a reset.
1325 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1329 ixgbevf_setup_psrtype(adapter);
1331 /* set_rx_buffer_len must be called before ring initialization */
1332 ixgbevf_set_rx_buffer_len(adapter);
1334 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1335 * the Base and Length of the Rx Descriptor Ring */
1336 for (i = 0; i < adapter->num_rx_queues; i++)
1337 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1340 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1341 __be16 proto, u16 vid)
1343 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1344 struct ixgbe_hw *hw = &adapter->hw;
1347 spin_lock_bh(&adapter->mbx_lock);
1349 /* add VID to filter table */
1350 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1352 spin_unlock_bh(&adapter->mbx_lock);
1354 /* translate error return types so error makes sense */
1355 if (err == IXGBE_ERR_MBX)
1358 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1361 set_bit(vid, adapter->active_vlans);
1366 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1367 __be16 proto, u16 vid)
1369 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1370 struct ixgbe_hw *hw = &adapter->hw;
1371 int err = -EOPNOTSUPP;
1373 spin_lock_bh(&adapter->mbx_lock);
1375 /* remove VID from filter table */
1376 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1378 spin_unlock_bh(&adapter->mbx_lock);
1380 clear_bit(vid, adapter->active_vlans);
1385 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1389 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1390 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1391 htons(ETH_P_8021Q), vid);
1394 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1396 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1397 struct ixgbe_hw *hw = &adapter->hw;
1400 if ((netdev_uc_count(netdev)) > 10) {
1401 pr_err("Too many unicast filters - No Space\n");
1405 if (!netdev_uc_empty(netdev)) {
1406 struct netdev_hw_addr *ha;
1407 netdev_for_each_uc_addr(ha, netdev) {
1408 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1413 * If the list is empty then send message to PF driver to
1414 * clear all macvlans on this VF.
1416 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1423 * ixgbevf_set_rx_mode - Multicast and unicast set
1424 * @netdev: network interface device structure
1426 * The set_rx_method entry point is called whenever the multicast address
1427 * list, unicast address list or the network interface flags are updated.
1428 * This routine is responsible for configuring the hardware for proper
1429 * multicast mode and configuring requested unicast filters.
1431 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1433 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1434 struct ixgbe_hw *hw = &adapter->hw;
1436 spin_lock_bh(&adapter->mbx_lock);
1438 /* reprogram multicast list */
1439 hw->mac.ops.update_mc_addr_list(hw, netdev);
1441 ixgbevf_write_uc_addr_list(netdev);
1443 spin_unlock_bh(&adapter->mbx_lock);
1446 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1449 struct ixgbevf_q_vector *q_vector;
1450 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1452 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1453 q_vector = adapter->q_vector[q_idx];
1454 #ifdef CONFIG_NET_RX_BUSY_POLL
1455 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1457 napi_enable(&q_vector->napi);
1461 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1464 struct ixgbevf_q_vector *q_vector;
1465 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1467 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1468 q_vector = adapter->q_vector[q_idx];
1469 napi_disable(&q_vector->napi);
1470 #ifdef CONFIG_NET_RX_BUSY_POLL
1471 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1472 pr_info("QV %d locked\n", q_idx);
1473 usleep_range(1000, 20000);
1475 #endif /* CONFIG_NET_RX_BUSY_POLL */
1479 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1481 struct ixgbe_hw *hw = &adapter->hw;
1482 unsigned int def_q = 0;
1483 unsigned int num_tcs = 0;
1484 unsigned int num_rx_queues = 1;
1487 spin_lock_bh(&adapter->mbx_lock);
1489 /* fetch queue configuration from the PF */
1490 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1492 spin_unlock_bh(&adapter->mbx_lock);
1498 /* update default Tx ring register index */
1499 adapter->tx_ring[0]->reg_idx = def_q;
1501 /* we need as many queues as traffic classes */
1502 num_rx_queues = num_tcs;
1505 /* if we have a bad config abort request queue reset */
1506 if (adapter->num_rx_queues != num_rx_queues) {
1507 /* force mailbox timeout to prevent further messages */
1508 hw->mbx.timeout = 0;
1510 /* wait for watchdog to come around and bail us out */
1511 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1517 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1519 ixgbevf_configure_dcb(adapter);
1521 ixgbevf_set_rx_mode(adapter->netdev);
1523 ixgbevf_restore_vlan(adapter);
1525 ixgbevf_configure_tx(adapter);
1526 ixgbevf_configure_rx(adapter);
1529 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1531 /* Only save pre-reset stats if there are some */
1532 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1533 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1534 adapter->stats.base_vfgprc;
1535 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1536 adapter->stats.base_vfgptc;
1537 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1538 adapter->stats.base_vfgorc;
1539 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1540 adapter->stats.base_vfgotc;
1541 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1542 adapter->stats.base_vfmprc;
1546 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1548 struct ixgbe_hw *hw = &adapter->hw;
1550 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1551 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1552 adapter->stats.last_vfgorc |=
1553 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1554 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1555 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1556 adapter->stats.last_vfgotc |=
1557 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1558 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1560 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1561 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1562 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1563 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1564 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1567 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1569 struct ixgbe_hw *hw = &adapter->hw;
1570 int api[] = { ixgbe_mbox_api_11,
1572 ixgbe_mbox_api_unknown };
1573 int err = 0, idx = 0;
1575 spin_lock_bh(&adapter->mbx_lock);
1577 while (api[idx] != ixgbe_mbox_api_unknown) {
1578 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1584 spin_unlock_bh(&adapter->mbx_lock);
1587 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1589 struct net_device *netdev = adapter->netdev;
1590 struct ixgbe_hw *hw = &adapter->hw;
1592 ixgbevf_configure_msix(adapter);
1594 spin_lock_bh(&adapter->mbx_lock);
1596 if (is_valid_ether_addr(hw->mac.addr))
1597 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1599 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1601 spin_unlock_bh(&adapter->mbx_lock);
1603 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1604 ixgbevf_napi_enable_all(adapter);
1606 /* enable transmits */
1607 netif_tx_start_all_queues(netdev);
1609 ixgbevf_save_reset_stats(adapter);
1610 ixgbevf_init_last_counter_stats(adapter);
1612 hw->mac.get_link_status = 1;
1613 mod_timer(&adapter->watchdog_timer, jiffies);
1616 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1618 struct ixgbe_hw *hw = &adapter->hw;
1620 ixgbevf_configure(adapter);
1622 ixgbevf_up_complete(adapter);
1624 /* clear any pending interrupts, may auto mask */
1625 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1627 ixgbevf_irq_enable(adapter);
1631 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1632 * @rx_ring: ring to free buffers from
1634 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
1639 if (!rx_ring->rx_buffer_info)
1642 /* Free all the Rx ring sk_buffs */
1643 for (i = 0; i < rx_ring->count; i++) {
1644 struct ixgbevf_rx_buffer *rx_buffer_info;
1646 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1647 if (rx_buffer_info->dma) {
1648 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
1649 rx_ring->rx_buf_len,
1651 rx_buffer_info->dma = 0;
1653 if (rx_buffer_info->skb) {
1654 struct sk_buff *skb = rx_buffer_info->skb;
1655 rx_buffer_info->skb = NULL;
1657 struct sk_buff *this = skb;
1658 skb = IXGBE_CB(skb)->prev;
1659 dev_kfree_skb(this);
1664 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1665 memset(rx_ring->rx_buffer_info, 0, size);
1667 /* Zero out the descriptor ring */
1668 memset(rx_ring->desc, 0, rx_ring->size);
1672 * ixgbevf_clean_tx_ring - Free Tx Buffers
1673 * @tx_ring: ring to be cleaned
1675 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
1677 struct ixgbevf_tx_buffer *tx_buffer_info;
1681 if (!tx_ring->tx_buffer_info)
1684 /* Free all the Tx ring sk_buffs */
1685 for (i = 0; i < tx_ring->count; i++) {
1686 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1687 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1690 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1691 memset(tx_ring->tx_buffer_info, 0, size);
1693 memset(tx_ring->desc, 0, tx_ring->size);
1697 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1698 * @adapter: board private structure
1700 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1704 for (i = 0; i < adapter->num_rx_queues; i++)
1705 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
1709 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1710 * @adapter: board private structure
1712 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1716 for (i = 0; i < adapter->num_tx_queues; i++)
1717 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
1720 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1722 struct net_device *netdev = adapter->netdev;
1723 struct ixgbe_hw *hw = &adapter->hw;
1726 /* signal that we are down to the interrupt handler */
1727 set_bit(__IXGBEVF_DOWN, &adapter->state);
1729 /* disable all enabled rx queues */
1730 for (i = 0; i < adapter->num_rx_queues; i++)
1731 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
1733 netif_tx_disable(netdev);
1737 netif_tx_stop_all_queues(netdev);
1739 ixgbevf_irq_disable(adapter);
1741 ixgbevf_napi_disable_all(adapter);
1743 del_timer_sync(&adapter->watchdog_timer);
1744 /* can't call flush scheduled work here because it can deadlock
1745 * if linkwatch_event tries to acquire the rtnl_lock which we are
1747 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1750 /* disable transmits in the hardware now that interrupts are off */
1751 for (i = 0; i < adapter->num_tx_queues; i++) {
1752 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
1754 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
1755 IXGBE_TXDCTL_SWFLSH);
1758 netif_carrier_off(netdev);
1760 if (!pci_channel_offline(adapter->pdev))
1761 ixgbevf_reset(adapter);
1763 ixgbevf_clean_all_tx_rings(adapter);
1764 ixgbevf_clean_all_rx_rings(adapter);
1767 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1769 WARN_ON(in_interrupt());
1771 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1774 ixgbevf_down(adapter);
1775 ixgbevf_up(adapter);
1777 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1780 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1782 struct ixgbe_hw *hw = &adapter->hw;
1783 struct net_device *netdev = adapter->netdev;
1785 if (hw->mac.ops.reset_hw(hw)) {
1786 hw_dbg(hw, "PF still resetting\n");
1788 hw->mac.ops.init_hw(hw);
1789 ixgbevf_negotiate_api(adapter);
1792 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1793 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1795 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1800 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1804 int vector_threshold;
1806 /* We'll want at least 2 (vector_threshold):
1807 * 1) TxQ[0] + RxQ[0] handler
1808 * 2) Other (Link Status Change, etc.)
1810 vector_threshold = MIN_MSIX_COUNT;
1812 /* The more we get, the more we will assign to Tx/Rx Cleanup
1813 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1814 * Right now, we simply care about how many we'll get; we'll
1815 * set them up later while requesting irq's.
1817 while (vectors >= vector_threshold) {
1818 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1820 if (!err || err < 0) /* Success or a nasty failure. */
1822 else /* err == number of vectors we should try again with */
1826 if (vectors < vector_threshold)
1830 dev_err(&adapter->pdev->dev,
1831 "Unable to allocate MSI-X interrupts\n");
1832 kfree(adapter->msix_entries);
1833 adapter->msix_entries = NULL;
1836 * Adjust for only the vectors we'll use, which is minimum
1837 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1838 * vectors we were allocated.
1840 adapter->num_msix_vectors = vectors;
1847 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1848 * @adapter: board private structure to initialize
1850 * This is the top level queue allocation routine. The order here is very
1851 * important, starting with the "most" number of features turned on at once,
1852 * and ending with the smallest set of features. This way large combinations
1853 * can be allocated if they're turned on, and smaller combinations are the
1854 * fallthrough conditions.
1857 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1859 struct ixgbe_hw *hw = &adapter->hw;
1860 unsigned int def_q = 0;
1861 unsigned int num_tcs = 0;
1864 /* Start with base case */
1865 adapter->num_rx_queues = 1;
1866 adapter->num_tx_queues = 1;
1868 spin_lock_bh(&adapter->mbx_lock);
1870 /* fetch queue configuration from the PF */
1871 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1873 spin_unlock_bh(&adapter->mbx_lock);
1878 /* we need as many queues as traffic classes */
1880 adapter->num_rx_queues = num_tcs;
1884 * ixgbevf_alloc_queues - Allocate memory for all rings
1885 * @adapter: board private structure to initialize
1887 * We allocate one ring per queue at run-time since we don't know the
1888 * number of queues at compile-time. The polling_netdev array is
1889 * intended for Multiqueue, but should work fine with a single queue.
1891 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1893 struct ixgbevf_ring *ring;
1896 for (; tx < adapter->num_tx_queues; tx++) {
1897 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1899 goto err_allocation;
1901 ring->dev = &adapter->pdev->dev;
1902 ring->netdev = adapter->netdev;
1903 ring->count = adapter->tx_ring_count;
1904 ring->queue_index = tx;
1907 adapter->tx_ring[tx] = ring;
1910 for (; rx < adapter->num_rx_queues; rx++) {
1911 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1913 goto err_allocation;
1915 ring->dev = &adapter->pdev->dev;
1916 ring->netdev = adapter->netdev;
1918 ring->count = adapter->rx_ring_count;
1919 ring->queue_index = rx;
1922 adapter->rx_ring[rx] = ring;
1929 kfree(adapter->tx_ring[--tx]);
1930 adapter->tx_ring[tx] = NULL;
1934 kfree(adapter->rx_ring[--rx]);
1935 adapter->rx_ring[rx] = NULL;
1941 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1942 * @adapter: board private structure to initialize
1944 * Attempt to configure the interrupts using the best available
1945 * capabilities of the hardware and the kernel.
1947 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1949 struct net_device *netdev = adapter->netdev;
1951 int vector, v_budget;
1954 * It's easy to be greedy for MSI-X vectors, but it really
1955 * doesn't do us much good if we have a lot more vectors
1956 * than CPU's. So let's be conservative and only ask for
1957 * (roughly) the same number of vectors as there are CPU's.
1958 * The default is to use pairs of vectors.
1960 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1961 v_budget = min_t(int, v_budget, num_online_cpus());
1962 v_budget += NON_Q_VECTORS;
1964 /* A failure in MSI-X entry allocation isn't fatal, but it does
1965 * mean we disable MSI-X capabilities of the adapter. */
1966 adapter->msix_entries = kcalloc(v_budget,
1967 sizeof(struct msix_entry), GFP_KERNEL);
1968 if (!adapter->msix_entries) {
1973 for (vector = 0; vector < v_budget; vector++)
1974 adapter->msix_entries[vector].entry = vector;
1976 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1980 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1984 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1991 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1992 * @adapter: board private structure to initialize
1994 * We allocate one q_vector per queue interrupt. If allocation fails we
1997 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1999 int q_idx, num_q_vectors;
2000 struct ixgbevf_q_vector *q_vector;
2002 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2004 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2005 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2008 q_vector->adapter = adapter;
2009 q_vector->v_idx = q_idx;
2010 netif_napi_add(adapter->netdev, &q_vector->napi,
2012 #ifdef CONFIG_NET_RX_BUSY_POLL
2013 napi_hash_add(&q_vector->napi);
2015 adapter->q_vector[q_idx] = q_vector;
2023 q_vector = adapter->q_vector[q_idx];
2024 #ifdef CONFIG_NET_RX_BUSY_POLL
2025 napi_hash_del(&q_vector->napi);
2027 netif_napi_del(&q_vector->napi);
2029 adapter->q_vector[q_idx] = NULL;
2035 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2036 * @adapter: board private structure to initialize
2038 * This function frees the memory allocated to the q_vectors. In addition if
2039 * NAPI is enabled it will delete any references to the NAPI struct prior
2040 * to freeing the q_vector.
2042 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2044 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2046 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2047 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2049 adapter->q_vector[q_idx] = NULL;
2050 #ifdef CONFIG_NET_RX_BUSY_POLL
2051 napi_hash_del(&q_vector->napi);
2053 netif_napi_del(&q_vector->napi);
2059 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2060 * @adapter: board private structure
2063 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2065 pci_disable_msix(adapter->pdev);
2066 kfree(adapter->msix_entries);
2067 adapter->msix_entries = NULL;
2071 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2072 * @adapter: board private structure to initialize
2075 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2079 /* Number of supported queues */
2080 ixgbevf_set_num_queues(adapter);
2082 err = ixgbevf_set_interrupt_capability(adapter);
2084 hw_dbg(&adapter->hw,
2085 "Unable to setup interrupt capabilities\n");
2086 goto err_set_interrupt;
2089 err = ixgbevf_alloc_q_vectors(adapter);
2091 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2093 goto err_alloc_q_vectors;
2096 err = ixgbevf_alloc_queues(adapter);
2098 pr_err("Unable to allocate memory for queues\n");
2099 goto err_alloc_queues;
2102 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2103 "Tx Queue count = %u\n",
2104 (adapter->num_rx_queues > 1) ? "Enabled" :
2105 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2107 set_bit(__IXGBEVF_DOWN, &adapter->state);
2111 ixgbevf_free_q_vectors(adapter);
2112 err_alloc_q_vectors:
2113 ixgbevf_reset_interrupt_capability(adapter);
2119 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2120 * @adapter: board private structure to clear interrupt scheme on
2122 * We go through and clear interrupt specific resources and reset the structure
2123 * to pre-load conditions
2125 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2129 for (i = 0; i < adapter->num_tx_queues; i++) {
2130 kfree(adapter->tx_ring[i]);
2131 adapter->tx_ring[i] = NULL;
2133 for (i = 0; i < adapter->num_rx_queues; i++) {
2134 kfree(adapter->rx_ring[i]);
2135 adapter->rx_ring[i] = NULL;
2138 adapter->num_tx_queues = 0;
2139 adapter->num_rx_queues = 0;
2141 ixgbevf_free_q_vectors(adapter);
2142 ixgbevf_reset_interrupt_capability(adapter);
2146 * ixgbevf_sw_init - Initialize general software structures
2147 * (struct ixgbevf_adapter)
2148 * @adapter: board private structure to initialize
2150 * ixgbevf_sw_init initializes the Adapter private data structure.
2151 * Fields are initialized based on PCI device information and
2152 * OS network device settings (MTU size).
2154 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2156 struct ixgbe_hw *hw = &adapter->hw;
2157 struct pci_dev *pdev = adapter->pdev;
2158 struct net_device *netdev = adapter->netdev;
2161 /* PCI config space info */
2163 hw->vendor_id = pdev->vendor;
2164 hw->device_id = pdev->device;
2165 hw->revision_id = pdev->revision;
2166 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2167 hw->subsystem_device_id = pdev->subsystem_device;
2169 hw->mbx.ops.init_params(hw);
2171 /* assume legacy case in which PF would only give VF 2 queues */
2172 hw->mac.max_tx_queues = 2;
2173 hw->mac.max_rx_queues = 2;
2175 /* lock to protect mailbox accesses */
2176 spin_lock_init(&adapter->mbx_lock);
2178 err = hw->mac.ops.reset_hw(hw);
2180 dev_info(&pdev->dev,
2181 "PF still in reset state. Is the PF interface up?\n");
2183 err = hw->mac.ops.init_hw(hw);
2185 pr_err("init_shared_code failed: %d\n", err);
2188 ixgbevf_negotiate_api(adapter);
2189 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2191 dev_info(&pdev->dev, "Error reading MAC address\n");
2192 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2193 dev_info(&pdev->dev,
2194 "MAC address not assigned by administrator.\n");
2195 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2198 if (!is_valid_ether_addr(netdev->dev_addr)) {
2199 dev_info(&pdev->dev, "Assigning random MAC address\n");
2200 eth_hw_addr_random(netdev);
2201 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2204 /* Enable dynamic interrupt throttling rates */
2205 adapter->rx_itr_setting = 1;
2206 adapter->tx_itr_setting = 1;
2208 /* set default ring sizes */
2209 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2210 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2212 set_bit(__IXGBEVF_DOWN, &adapter->state);
2219 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2221 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2222 if (current_counter < last_counter) \
2223 counter += 0x100000000LL; \
2224 last_counter = current_counter; \
2225 counter &= 0xFFFFFFFF00000000LL; \
2226 counter |= current_counter; \
2229 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2231 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2232 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2233 u64 current_counter = (current_counter_msb << 32) | \
2234 current_counter_lsb; \
2235 if (current_counter < last_counter) \
2236 counter += 0x1000000000LL; \
2237 last_counter = current_counter; \
2238 counter &= 0xFFFFFFF000000000LL; \
2239 counter |= current_counter; \
2242 * ixgbevf_update_stats - Update the board statistics counters.
2243 * @adapter: board private structure
2245 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2247 struct ixgbe_hw *hw = &adapter->hw;
2250 if (!adapter->link_up)
2253 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2254 adapter->stats.vfgprc);
2255 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2256 adapter->stats.vfgptc);
2257 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2258 adapter->stats.last_vfgorc,
2259 adapter->stats.vfgorc);
2260 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2261 adapter->stats.last_vfgotc,
2262 adapter->stats.vfgotc);
2263 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2264 adapter->stats.vfmprc);
2266 for (i = 0; i < adapter->num_rx_queues; i++) {
2267 adapter->hw_csum_rx_error +=
2268 adapter->rx_ring[i]->hw_csum_rx_error;
2269 adapter->hw_csum_rx_good +=
2270 adapter->rx_ring[i]->hw_csum_rx_good;
2271 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2272 adapter->rx_ring[i]->hw_csum_rx_good = 0;
2277 * ixgbevf_watchdog - Timer Call-back
2278 * @data: pointer to adapter cast into an unsigned long
2280 static void ixgbevf_watchdog(unsigned long data)
2282 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2283 struct ixgbe_hw *hw = &adapter->hw;
2288 * Do the watchdog outside of interrupt context due to the lovely
2289 * delays that some of the newer hardware requires
2292 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2293 goto watchdog_short_circuit;
2295 /* get one bit for every active tx/rx interrupt vector */
2296 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2297 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2298 if (qv->rx.ring || qv->tx.ring)
2302 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2304 watchdog_short_circuit:
2305 schedule_work(&adapter->watchdog_task);
2309 * ixgbevf_tx_timeout - Respond to a Tx Hang
2310 * @netdev: network interface device structure
2312 static void ixgbevf_tx_timeout(struct net_device *netdev)
2314 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2316 /* Do the reset outside of interrupt context */
2317 schedule_work(&adapter->reset_task);
2320 static void ixgbevf_reset_task(struct work_struct *work)
2322 struct ixgbevf_adapter *adapter;
2323 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2325 /* If we're already down or resetting, just bail */
2326 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2327 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2330 adapter->tx_timeout_count++;
2332 ixgbevf_reinit_locked(adapter);
2336 * ixgbevf_watchdog_task - worker thread to bring link up
2337 * @work: pointer to work_struct containing our data
2339 static void ixgbevf_watchdog_task(struct work_struct *work)
2341 struct ixgbevf_adapter *adapter = container_of(work,
2342 struct ixgbevf_adapter,
2344 struct net_device *netdev = adapter->netdev;
2345 struct ixgbe_hw *hw = &adapter->hw;
2346 u32 link_speed = adapter->link_speed;
2347 bool link_up = adapter->link_up;
2350 ixgbevf_queue_reset_subtask(adapter);
2352 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2355 * Always check the link on the watchdog because we have
2358 spin_lock_bh(&adapter->mbx_lock);
2360 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2362 spin_unlock_bh(&adapter->mbx_lock);
2365 adapter->link_up = link_up;
2366 adapter->link_speed = link_speed;
2367 netif_carrier_off(netdev);
2368 netif_tx_stop_all_queues(netdev);
2369 schedule_work(&adapter->reset_task);
2372 adapter->link_up = link_up;
2373 adapter->link_speed = link_speed;
2376 if (!netif_carrier_ok(netdev)) {
2377 char *link_speed_string;
2378 switch (link_speed) {
2379 case IXGBE_LINK_SPEED_10GB_FULL:
2380 link_speed_string = "10 Gbps";
2382 case IXGBE_LINK_SPEED_1GB_FULL:
2383 link_speed_string = "1 Gbps";
2385 case IXGBE_LINK_SPEED_100_FULL:
2386 link_speed_string = "100 Mbps";
2389 link_speed_string = "unknown speed";
2392 dev_info(&adapter->pdev->dev,
2393 "NIC Link is Up, %s\n", link_speed_string);
2394 netif_carrier_on(netdev);
2395 netif_tx_wake_all_queues(netdev);
2398 adapter->link_up = false;
2399 adapter->link_speed = 0;
2400 if (netif_carrier_ok(netdev)) {
2401 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2402 netif_carrier_off(netdev);
2403 netif_tx_stop_all_queues(netdev);
2407 ixgbevf_update_stats(adapter);
2410 /* Reset the timer */
2411 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2412 mod_timer(&adapter->watchdog_timer,
2413 round_jiffies(jiffies + (2 * HZ)));
2415 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2419 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2420 * @tx_ring: Tx descriptor ring for a specific queue
2422 * Free all transmit software resources
2424 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2426 ixgbevf_clean_tx_ring(tx_ring);
2428 vfree(tx_ring->tx_buffer_info);
2429 tx_ring->tx_buffer_info = NULL;
2431 /* if not set, then don't free */
2435 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2438 tx_ring->desc = NULL;
2442 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2443 * @adapter: board private structure
2445 * Free all transmit software resources
2447 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2451 for (i = 0; i < adapter->num_tx_queues; i++)
2452 if (adapter->tx_ring[i]->desc)
2453 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2457 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2458 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2460 * Return 0 on success, negative on failure
2462 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2466 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2467 tx_ring->tx_buffer_info = vzalloc(size);
2468 if (!tx_ring->tx_buffer_info)
2471 /* round up to nearest 4K */
2472 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2473 tx_ring->size = ALIGN(tx_ring->size, 4096);
2475 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2476 &tx_ring->dma, GFP_KERNEL);
2483 vfree(tx_ring->tx_buffer_info);
2484 tx_ring->tx_buffer_info = NULL;
2485 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2486 "descriptor ring\n");
2491 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2492 * @adapter: board private structure
2494 * If this function returns with an error, then it's possible one or
2495 * more of the rings is populated (while the rest are not). It is the
2496 * callers duty to clean those orphaned rings.
2498 * Return 0 on success, negative on failure
2500 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2504 for (i = 0; i < adapter->num_tx_queues; i++) {
2505 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
2508 hw_dbg(&adapter->hw,
2509 "Allocation for Tx Queue %u failed\n", i);
2517 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2518 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2520 * Returns 0 on success, negative on failure
2522 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
2526 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2527 rx_ring->rx_buffer_info = vzalloc(size);
2528 if (!rx_ring->rx_buffer_info)
2531 /* Round up to nearest 4K */
2532 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2533 rx_ring->size = ALIGN(rx_ring->size, 4096);
2535 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2536 &rx_ring->dma, GFP_KERNEL);
2543 vfree(rx_ring->rx_buffer_info);
2544 rx_ring->rx_buffer_info = NULL;
2545 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
2550 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2551 * @adapter: board private structure
2553 * If this function returns with an error, then it's possible one or
2554 * more of the rings is populated (while the rest are not). It is the
2555 * callers duty to clean those orphaned rings.
2557 * Return 0 on success, negative on failure
2559 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2563 for (i = 0; i < adapter->num_rx_queues; i++) {
2564 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
2567 hw_dbg(&adapter->hw,
2568 "Allocation for Rx Queue %u failed\n", i);
2575 * ixgbevf_free_rx_resources - Free Rx Resources
2576 * @rx_ring: ring to clean the resources from
2578 * Free all receive software resources
2580 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
2582 ixgbevf_clean_rx_ring(rx_ring);
2584 vfree(rx_ring->rx_buffer_info);
2585 rx_ring->rx_buffer_info = NULL;
2587 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2590 rx_ring->desc = NULL;
2594 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2595 * @adapter: board private structure
2597 * Free all receive software resources
2599 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2603 for (i = 0; i < adapter->num_rx_queues; i++)
2604 if (adapter->rx_ring[i]->desc)
2605 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
2609 * ixgbevf_open - Called when a network interface is made active
2610 * @netdev: network interface device structure
2612 * Returns 0 on success, negative value on failure
2614 * The open entry point is called when a network interface is made
2615 * active by the system (IFF_UP). At this point all resources needed
2616 * for transmit and receive operations are allocated, the interrupt
2617 * handler is registered with the OS, the watchdog timer is started,
2618 * and the stack is notified that the interface is ready.
2620 static int ixgbevf_open(struct net_device *netdev)
2622 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2623 struct ixgbe_hw *hw = &adapter->hw;
2626 /* A previous failure to open the device because of a lack of
2627 * available MSIX vector resources may have reset the number
2628 * of msix vectors variable to zero. The only way to recover
2629 * is to unload/reload the driver and hope that the system has
2630 * been able to recover some MSIX vector resources.
2632 if (!adapter->num_msix_vectors)
2635 /* disallow open during test */
2636 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2639 if (hw->adapter_stopped) {
2640 ixgbevf_reset(adapter);
2641 /* if adapter is still stopped then PF isn't up and
2642 * the vf can't start. */
2643 if (hw->adapter_stopped) {
2644 err = IXGBE_ERR_MBX;
2645 pr_err("Unable to start - perhaps the PF Driver isn't "
2647 goto err_setup_reset;
2651 /* allocate transmit descriptors */
2652 err = ixgbevf_setup_all_tx_resources(adapter);
2656 /* allocate receive descriptors */
2657 err = ixgbevf_setup_all_rx_resources(adapter);
2661 ixgbevf_configure(adapter);
2664 * Map the Tx/Rx rings to the vectors we were allotted.
2665 * if request_irq will be called in this function map_rings
2666 * must be called *before* up_complete
2668 ixgbevf_map_rings_to_vectors(adapter);
2670 ixgbevf_up_complete(adapter);
2672 /* clear any pending interrupts, may auto mask */
2673 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2674 err = ixgbevf_request_irq(adapter);
2678 ixgbevf_irq_enable(adapter);
2683 ixgbevf_down(adapter);
2685 ixgbevf_free_all_rx_resources(adapter);
2687 ixgbevf_free_all_tx_resources(adapter);
2688 ixgbevf_reset(adapter);
2696 * ixgbevf_close - Disables a network interface
2697 * @netdev: network interface device structure
2699 * Returns 0, this is not allowed to fail
2701 * The close entry point is called when an interface is de-activated
2702 * by the OS. The hardware is still under the drivers control, but
2703 * needs to be disabled. A global MAC reset is issued to stop the
2704 * hardware, and all transmit and receive resources are freed.
2706 static int ixgbevf_close(struct net_device *netdev)
2708 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2710 ixgbevf_down(adapter);
2711 ixgbevf_free_irq(adapter);
2713 ixgbevf_free_all_tx_resources(adapter);
2714 ixgbevf_free_all_rx_resources(adapter);
2719 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
2721 struct net_device *dev = adapter->netdev;
2723 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
2726 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
2728 /* if interface is down do nothing */
2729 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2730 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2733 /* Hardware has to reinitialize queues and interrupts to
2734 * match packet buffer alignment. Unfortunately, the
2735 * hardware is not flexible enough to do this dynamically.
2737 if (netif_running(dev))
2740 ixgbevf_clear_interrupt_scheme(adapter);
2741 ixgbevf_init_interrupt_scheme(adapter);
2743 if (netif_running(dev))
2747 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2748 u32 vlan_macip_lens, u32 type_tucmd,
2751 struct ixgbe_adv_tx_context_desc *context_desc;
2752 u16 i = tx_ring->next_to_use;
2754 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2757 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2759 /* set bits to identify this as an advanced context descriptor */
2760 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2762 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2763 context_desc->seqnum_seed = 0;
2764 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2765 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2768 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2769 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2771 u32 vlan_macip_lens, type_tucmd;
2772 u32 mss_l4len_idx, l4len;
2774 if (!skb_is_gso(skb))
2777 if (skb_header_cloned(skb)) {
2778 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2783 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2784 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2786 if (skb->protocol == htons(ETH_P_IP)) {
2787 struct iphdr *iph = ip_hdr(skb);
2790 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2794 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2795 } else if (skb_is_gso_v6(skb)) {
2796 ipv6_hdr(skb)->payload_len = 0;
2797 tcp_hdr(skb)->check =
2798 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2799 &ipv6_hdr(skb)->daddr,
2803 /* compute header lengths */
2804 l4len = tcp_hdrlen(skb);
2806 *hdr_len = skb_transport_offset(skb) + l4len;
2808 /* mss_l4len_id: use 1 as index for TSO */
2809 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2810 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2811 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2813 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2814 vlan_macip_lens = skb_network_header_len(skb);
2815 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2816 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2818 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2819 type_tucmd, mss_l4len_idx);
2824 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2825 struct sk_buff *skb, u32 tx_flags)
2827 u32 vlan_macip_lens = 0;
2828 u32 mss_l4len_idx = 0;
2831 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2833 switch (skb->protocol) {
2834 case __constant_htons(ETH_P_IP):
2835 vlan_macip_lens |= skb_network_header_len(skb);
2836 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2837 l4_hdr = ip_hdr(skb)->protocol;
2839 case __constant_htons(ETH_P_IPV6):
2840 vlan_macip_lens |= skb_network_header_len(skb);
2841 l4_hdr = ipv6_hdr(skb)->nexthdr;
2844 if (unlikely(net_ratelimit())) {
2845 dev_warn(tx_ring->dev,
2846 "partial checksum but proto=%x!\n",
2854 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2855 mss_l4len_idx = tcp_hdrlen(skb) <<
2856 IXGBE_ADVTXD_L4LEN_SHIFT;
2859 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2860 mss_l4len_idx = sizeof(struct sctphdr) <<
2861 IXGBE_ADVTXD_L4LEN_SHIFT;
2864 mss_l4len_idx = sizeof(struct udphdr) <<
2865 IXGBE_ADVTXD_L4LEN_SHIFT;
2868 if (unlikely(net_ratelimit())) {
2869 dev_warn(tx_ring->dev,
2870 "partial checksum but l4 proto=%x!\n",
2877 /* vlan_macip_lens: MACLEN, VLAN tag */
2878 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2879 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2881 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2882 type_tucmd, mss_l4len_idx);
2884 return (skb->ip_summed == CHECKSUM_PARTIAL);
2887 static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2888 struct sk_buff *skb, u32 tx_flags)
2890 struct ixgbevf_tx_buffer *tx_buffer_info;
2892 unsigned int total = skb->len;
2893 unsigned int offset = 0, size;
2895 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2899 i = tx_ring->next_to_use;
2901 len = min(skb_headlen(skb), total);
2903 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2904 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2906 tx_buffer_info->length = size;
2907 tx_buffer_info->mapped_as_page = false;
2908 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2910 size, DMA_TO_DEVICE);
2911 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2919 if (i == tx_ring->count)
2923 for (f = 0; f < nr_frags; f++) {
2924 const struct skb_frag_struct *frag;
2926 frag = &skb_shinfo(skb)->frags[f];
2927 len = min((unsigned int)skb_frag_size(frag), total);
2931 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2932 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2934 tx_buffer_info->length = size;
2935 tx_buffer_info->dma =
2936 skb_frag_dma_map(tx_ring->dev, frag,
2937 offset, size, DMA_TO_DEVICE);
2938 if (dma_mapping_error(tx_ring->dev,
2939 tx_buffer_info->dma))
2941 tx_buffer_info->mapped_as_page = true;
2948 if (i == tx_ring->count)
2956 i = tx_ring->count - 1;
2959 tx_ring->tx_buffer_info[i].skb = skb;
2964 dev_err(tx_ring->dev, "TX DMA map failed\n");
2966 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2967 tx_buffer_info->dma = 0;
2970 /* clear timestamp and dma mappings for remaining portion of packet */
2971 while (count >= 0) {
2975 i += tx_ring->count;
2976 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2977 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2983 static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2984 int count, unsigned int first, u32 paylen,
2987 union ixgbe_adv_tx_desc *tx_desc = NULL;
2988 struct ixgbevf_tx_buffer *tx_buffer_info;
2989 u32 olinfo_status = 0, cmd_type_len = 0;
2992 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2994 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2996 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2998 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2999 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3001 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3002 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
3004 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3005 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3007 /* use index 1 context for tso */
3008 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3009 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3010 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
3014 * Check Context must be set if Tx switch is enabled, which it
3015 * always is for case where virtual functions are running
3017 olinfo_status |= IXGBE_ADVTXD_CC;
3019 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3021 i = tx_ring->next_to_use;
3023 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3024 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3025 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3026 tx_desc->read.cmd_type_len =
3027 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3028 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3030 if (i == tx_ring->count)
3034 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3036 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
3038 /* Force memory writes to complete before letting h/w
3039 * know there are new descriptors to fetch. (Only
3040 * applicable for weak-ordered memory model archs,
3045 tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
3046 tx_ring->next_to_use = i;
3049 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3051 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3053 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3054 /* Herbert's original patch had:
3055 * smp_mb__after_netif_stop_queue();
3056 * but since that doesn't exist yet, just open code it. */
3059 /* We need to check again in a case another CPU has just
3060 * made room available. */
3061 if (likely(ixgbevf_desc_unused(tx_ring) < size))
3064 /* A reprieve! - use start_queue because it doesn't call schedule */
3065 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3066 ++adapter->restart_queue;
3070 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3072 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3074 return __ixgbevf_maybe_stop_tx(tx_ring, size);
3077 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3079 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3080 struct ixgbevf_ring *tx_ring;
3082 unsigned int tx_flags = 0;
3085 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3086 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3089 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3090 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3092 return NETDEV_TX_OK;
3095 tx_ring = adapter->tx_ring[r_idx];
3098 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3099 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3100 * + 2 desc gap to keep tail from touching head,
3101 * + 1 desc for context descriptor,
3102 * otherwise try next time
3104 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3105 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3106 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3108 count += skb_shinfo(skb)->nr_frags;
3110 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3112 return NETDEV_TX_BUSY;
3115 if (vlan_tx_tag_present(skb)) {
3116 tx_flags |= vlan_tx_tag_get(skb);
3117 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3118 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3121 first = tx_ring->next_to_use;
3123 if (skb->protocol == htons(ETH_P_IP))
3124 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3125 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
3127 dev_kfree_skb_any(skb);
3128 return NETDEV_TX_OK;
3132 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3133 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3134 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3136 ixgbevf_tx_queue(tx_ring, tx_flags,
3137 ixgbevf_tx_map(tx_ring, skb, tx_flags),
3138 first, skb->len, hdr_len);
3140 writel(tx_ring->next_to_use, tx_ring->tail);
3142 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3144 return NETDEV_TX_OK;
3148 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3149 * @netdev: network interface device structure
3150 * @p: pointer to an address structure
3152 * Returns 0 on success, negative on failure
3154 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3156 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3157 struct ixgbe_hw *hw = &adapter->hw;
3158 struct sockaddr *addr = p;
3160 if (!is_valid_ether_addr(addr->sa_data))
3161 return -EADDRNOTAVAIL;
3163 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3164 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3166 spin_lock_bh(&adapter->mbx_lock);
3168 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3170 spin_unlock_bh(&adapter->mbx_lock);
3176 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3177 * @netdev: network interface device structure
3178 * @new_mtu: new value for maximum frame size
3180 * Returns 0 on success, negative on failure
3182 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3184 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3185 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3186 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3188 switch (adapter->hw.api_version) {
3189 case ixgbe_mbox_api_11:
3190 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3193 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3194 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3198 /* MTU < 68 is an error and causes problems on some kernels */
3199 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3202 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3203 netdev->mtu, new_mtu);
3204 /* must set new MTU before calling down or up */
3205 netdev->mtu = new_mtu;
3207 if (netif_running(netdev))
3208 ixgbevf_reinit_locked(adapter);
3213 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3215 struct net_device *netdev = pci_get_drvdata(pdev);
3216 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3221 netif_device_detach(netdev);
3223 if (netif_running(netdev)) {
3225 ixgbevf_down(adapter);
3226 ixgbevf_free_irq(adapter);
3227 ixgbevf_free_all_tx_resources(adapter);
3228 ixgbevf_free_all_rx_resources(adapter);
3232 ixgbevf_clear_interrupt_scheme(adapter);
3235 retval = pci_save_state(pdev);
3240 pci_disable_device(pdev);
3246 static int ixgbevf_resume(struct pci_dev *pdev)
3248 struct net_device *netdev = pci_get_drvdata(pdev);
3249 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3252 pci_set_power_state(pdev, PCI_D0);
3253 pci_restore_state(pdev);
3255 * pci_restore_state clears dev->state_saved so call
3256 * pci_save_state to restore it.
3258 pci_save_state(pdev);
3260 err = pci_enable_device_mem(pdev);
3262 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3265 pci_set_master(pdev);
3267 ixgbevf_reset(adapter);
3270 err = ixgbevf_init_interrupt_scheme(adapter);
3273 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3277 if (netif_running(netdev)) {
3278 err = ixgbevf_open(netdev);
3283 netif_device_attach(netdev);
3288 #endif /* CONFIG_PM */
3289 static void ixgbevf_shutdown(struct pci_dev *pdev)
3291 ixgbevf_suspend(pdev, PMSG_SUSPEND);
3294 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3295 struct rtnl_link_stats64 *stats)
3297 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3300 const struct ixgbevf_ring *ring;
3303 ixgbevf_update_stats(adapter);
3305 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3307 for (i = 0; i < adapter->num_rx_queues; i++) {
3308 ring = adapter->rx_ring[i];
3310 start = u64_stats_fetch_begin_bh(&ring->syncp);
3311 bytes = ring->total_bytes;
3312 packets = ring->total_packets;
3313 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3314 stats->rx_bytes += bytes;
3315 stats->rx_packets += packets;
3318 for (i = 0; i < adapter->num_tx_queues; i++) {
3319 ring = adapter->tx_ring[i];
3321 start = u64_stats_fetch_begin_bh(&ring->syncp);
3322 bytes = ring->total_bytes;
3323 packets = ring->total_packets;
3324 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3325 stats->tx_bytes += bytes;
3326 stats->tx_packets += packets;
3332 static const struct net_device_ops ixgbevf_netdev_ops = {
3333 .ndo_open = ixgbevf_open,
3334 .ndo_stop = ixgbevf_close,
3335 .ndo_start_xmit = ixgbevf_xmit_frame,
3336 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3337 .ndo_get_stats64 = ixgbevf_get_stats,
3338 .ndo_validate_addr = eth_validate_addr,
3339 .ndo_set_mac_address = ixgbevf_set_mac,
3340 .ndo_change_mtu = ixgbevf_change_mtu,
3341 .ndo_tx_timeout = ixgbevf_tx_timeout,
3342 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3343 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3344 #ifdef CONFIG_NET_RX_BUSY_POLL
3345 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3349 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3351 dev->netdev_ops = &ixgbevf_netdev_ops;
3352 ixgbevf_set_ethtool_ops(dev);
3353 dev->watchdog_timeo = 5 * HZ;
3357 * ixgbevf_probe - Device Initialization Routine
3358 * @pdev: PCI device information struct
3359 * @ent: entry in ixgbevf_pci_tbl
3361 * Returns 0 on success, negative on failure
3363 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3364 * The OS initialization, configuring of the adapter private structure,
3365 * and a hardware reset occur.
3367 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3369 struct net_device *netdev;
3370 struct ixgbevf_adapter *adapter = NULL;
3371 struct ixgbe_hw *hw = NULL;
3372 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3373 static int cards_found;
3374 int err, pci_using_dac;
3376 err = pci_enable_device(pdev);
3380 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3383 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3385 dev_err(&pdev->dev, "No usable DMA "
3386 "configuration, aborting\n");
3392 err = pci_request_regions(pdev, ixgbevf_driver_name);
3394 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3398 pci_set_master(pdev);
3400 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3404 goto err_alloc_etherdev;
3407 SET_NETDEV_DEV(netdev, &pdev->dev);
3409 pci_set_drvdata(pdev, netdev);
3410 adapter = netdev_priv(netdev);
3412 adapter->netdev = netdev;
3413 adapter->pdev = pdev;
3416 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3419 * call save state here in standalone driver because it relies on
3420 * adapter struct to exist, and needs to call netdev_priv
3422 pci_save_state(pdev);
3424 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3425 pci_resource_len(pdev, 0));
3431 ixgbevf_assign_netdev_ops(netdev);
3433 adapter->bd_number = cards_found;
3436 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3437 hw->mac.type = ii->mac;
3439 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3440 sizeof(struct ixgbe_mbx_operations));
3442 /* setup the private structure */
3443 err = ixgbevf_sw_init(adapter);
3447 /* The HW MAC address was set and/or determined in sw_init */
3448 if (!is_valid_ether_addr(netdev->dev_addr)) {
3449 pr_err("invalid MAC address\n");
3454 netdev->hw_features = NETIF_F_SG |
3461 netdev->features = netdev->hw_features |
3462 NETIF_F_HW_VLAN_CTAG_TX |
3463 NETIF_F_HW_VLAN_CTAG_RX |
3464 NETIF_F_HW_VLAN_CTAG_FILTER;
3466 netdev->vlan_features |= NETIF_F_TSO;
3467 netdev->vlan_features |= NETIF_F_TSO6;
3468 netdev->vlan_features |= NETIF_F_IP_CSUM;
3469 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3470 netdev->vlan_features |= NETIF_F_SG;
3473 netdev->features |= NETIF_F_HIGHDMA;
3475 netdev->priv_flags |= IFF_UNICAST_FLT;
3477 init_timer(&adapter->watchdog_timer);
3478 adapter->watchdog_timer.function = ixgbevf_watchdog;
3479 adapter->watchdog_timer.data = (unsigned long)adapter;
3481 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3482 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3484 err = ixgbevf_init_interrupt_scheme(adapter);
3488 strcpy(netdev->name, "eth%d");
3490 err = register_netdev(netdev);
3494 netif_carrier_off(netdev);
3496 ixgbevf_init_last_counter_stats(adapter);
3498 /* print the MAC address */
3499 hw_dbg(hw, "%pM\n", netdev->dev_addr);
3501 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3503 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3508 ixgbevf_clear_interrupt_scheme(adapter);
3510 ixgbevf_reset_interrupt_capability(adapter);
3511 iounmap(hw->hw_addr);
3513 free_netdev(netdev);
3515 pci_release_regions(pdev);
3518 pci_disable_device(pdev);
3523 * ixgbevf_remove - Device Removal Routine
3524 * @pdev: PCI device information struct
3526 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3527 * that it should release a PCI device. The could be caused by a
3528 * Hot-Plug event, or because the driver is going to be removed from
3531 static void ixgbevf_remove(struct pci_dev *pdev)
3533 struct net_device *netdev = pci_get_drvdata(pdev);
3534 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3536 set_bit(__IXGBEVF_DOWN, &adapter->state);
3538 del_timer_sync(&adapter->watchdog_timer);
3540 cancel_work_sync(&adapter->reset_task);
3541 cancel_work_sync(&adapter->watchdog_task);
3543 if (netdev->reg_state == NETREG_REGISTERED)
3544 unregister_netdev(netdev);
3546 ixgbevf_clear_interrupt_scheme(adapter);
3547 ixgbevf_reset_interrupt_capability(adapter);
3549 iounmap(adapter->hw.hw_addr);
3550 pci_release_regions(pdev);
3552 hw_dbg(&adapter->hw, "Remove complete\n");
3554 free_netdev(netdev);
3556 pci_disable_device(pdev);
3560 * ixgbevf_io_error_detected - called when PCI error is detected
3561 * @pdev: Pointer to PCI device
3562 * @state: The current pci connection state
3564 * This function is called after a PCI bus error affecting
3565 * this device has been detected.
3567 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3568 pci_channel_state_t state)
3570 struct net_device *netdev = pci_get_drvdata(pdev);
3571 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3573 netif_device_detach(netdev);
3575 if (state == pci_channel_io_perm_failure)
3576 return PCI_ERS_RESULT_DISCONNECT;
3578 if (netif_running(netdev))
3579 ixgbevf_down(adapter);
3581 pci_disable_device(pdev);
3583 /* Request a slot slot reset. */
3584 return PCI_ERS_RESULT_NEED_RESET;
3588 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3589 * @pdev: Pointer to PCI device
3591 * Restart the card from scratch, as if from a cold-boot. Implementation
3592 * resembles the first-half of the ixgbevf_resume routine.
3594 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3596 struct net_device *netdev = pci_get_drvdata(pdev);
3597 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3599 if (pci_enable_device_mem(pdev)) {
3601 "Cannot re-enable PCI device after reset.\n");
3602 return PCI_ERS_RESULT_DISCONNECT;
3605 pci_set_master(pdev);
3607 ixgbevf_reset(adapter);
3609 return PCI_ERS_RESULT_RECOVERED;
3613 * ixgbevf_io_resume - called when traffic can start flowing again.
3614 * @pdev: Pointer to PCI device
3616 * This callback is called when the error recovery driver tells us that
3617 * its OK to resume normal operation. Implementation resembles the
3618 * second-half of the ixgbevf_resume routine.
3620 static void ixgbevf_io_resume(struct pci_dev *pdev)
3622 struct net_device *netdev = pci_get_drvdata(pdev);
3623 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3625 if (netif_running(netdev))
3626 ixgbevf_up(adapter);
3628 netif_device_attach(netdev);
3631 /* PCI Error Recovery (ERS) */
3632 static const struct pci_error_handlers ixgbevf_err_handler = {
3633 .error_detected = ixgbevf_io_error_detected,
3634 .slot_reset = ixgbevf_io_slot_reset,
3635 .resume = ixgbevf_io_resume,
3638 static struct pci_driver ixgbevf_driver = {
3639 .name = ixgbevf_driver_name,
3640 .id_table = ixgbevf_pci_tbl,
3641 .probe = ixgbevf_probe,
3642 .remove = ixgbevf_remove,
3644 /* Power Management Hooks */
3645 .suspend = ixgbevf_suspend,
3646 .resume = ixgbevf_resume,
3648 .shutdown = ixgbevf_shutdown,
3649 .err_handler = &ixgbevf_err_handler
3653 * ixgbevf_init_module - Driver Registration Routine
3655 * ixgbevf_init_module is the first routine called when the driver is
3656 * loaded. All it does is register with the PCI subsystem.
3658 static int __init ixgbevf_init_module(void)
3661 pr_info("%s - version %s\n", ixgbevf_driver_string,
3662 ixgbevf_driver_version);
3664 pr_info("%s\n", ixgbevf_copyright);
3666 ret = pci_register_driver(&ixgbevf_driver);
3670 module_init(ixgbevf_init_module);
3673 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3675 * ixgbevf_exit_module is called just before the driver is removed
3678 static void __exit ixgbevf_exit_module(void)
3680 pci_unregister_driver(&ixgbevf_driver);
3685 * ixgbevf_get_hw_dev_name - return device name string
3686 * used by hardware layer to print debugging information
3688 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3690 struct ixgbevf_adapter *adapter = hw->back;
3691 return adapter->netdev->name;
3695 module_exit(ixgbevf_exit_module);
3697 /* ixgbevf_main.c */