ixgbevf: add support for X550 VFs
[cascardo/linux.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
1 /*******************************************************************************
2
3   Intel 82599 Virtual Function driver
4   Copyright(c) 1999 - 2014 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28
29 /******************************************************************************
30  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/ip.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
51 #include <linux/if.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
54
55 #include "ixgbevf.h"
56
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59         "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60
61 #define DRV_VERSION "2.12.1-k"
62 const char ixgbevf_driver_version[] = DRV_VERSION;
63 static char ixgbevf_copyright[] =
64         "Copyright (c) 2009 - 2012 Intel Corporation.";
65
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67         [board_82599_vf] = &ixgbevf_82599_vf_info,
68         [board_X540_vf]  = &ixgbevf_X540_vf_info,
69         [board_X550_vf]  = &ixgbevf_X550_vf_info,
70         [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
71 };
72
73 /* ixgbevf_pci_tbl - PCI Device ID Table
74  *
75  * Wildcard entries (PCI_ANY_ID) should come last
76  * Last entry must be all 0s
77  *
78  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
79  *   Class, Class Mask, private data (not used) }
80  */
81 static const struct pci_device_id ixgbevf_pci_tbl[] = {
82         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
83         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
84         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
85         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
86         /* required last entry */
87         {0, }
88 };
89 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
90
91 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
92 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
93 MODULE_LICENSE("GPL");
94 MODULE_VERSION(DRV_VERSION);
95
96 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
97 static int debug = -1;
98 module_param(debug, int, 0);
99 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
100
101 /* forward decls */
102 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
103 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
104 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
105
106 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
107 {
108         struct ixgbevf_adapter *adapter = hw->back;
109
110         if (!hw->hw_addr)
111                 return;
112         hw->hw_addr = NULL;
113         dev_err(&adapter->pdev->dev, "Adapter removed\n");
114         if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
115                 schedule_work(&adapter->watchdog_task);
116 }
117
118 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
119 {
120         u32 value;
121
122         /* The following check not only optimizes a bit by not
123          * performing a read on the status register when the
124          * register just read was a status register read that
125          * returned IXGBE_FAILED_READ_REG. It also blocks any
126          * potential recursion.
127          */
128         if (reg == IXGBE_VFSTATUS) {
129                 ixgbevf_remove_adapter(hw);
130                 return;
131         }
132         value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
133         if (value == IXGBE_FAILED_READ_REG)
134                 ixgbevf_remove_adapter(hw);
135 }
136
137 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
138 {
139         u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
140         u32 value;
141
142         if (IXGBE_REMOVED(reg_addr))
143                 return IXGBE_FAILED_READ_REG;
144         value = readl(reg_addr + reg);
145         if (unlikely(value == IXGBE_FAILED_READ_REG))
146                 ixgbevf_check_remove(hw, reg);
147         return value;
148 }
149
150 /**
151  * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
152  * @adapter: pointer to adapter struct
153  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
154  * @queue: queue to map the corresponding interrupt to
155  * @msix_vector: the vector to map to the corresponding queue
156  */
157 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
158                              u8 queue, u8 msix_vector)
159 {
160         u32 ivar, index;
161         struct ixgbe_hw *hw = &adapter->hw;
162         if (direction == -1) {
163                 /* other causes */
164                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
165                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
166                 ivar &= ~0xFF;
167                 ivar |= msix_vector;
168                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
169         } else {
170                 /* tx or rx causes */
171                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
172                 index = ((16 * (queue & 1)) + (8 * direction));
173                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
174                 ivar &= ~(0xFF << index);
175                 ivar |= (msix_vector << index);
176                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
177         }
178 }
179
180 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
181                                         struct ixgbevf_tx_buffer *tx_buffer)
182 {
183         if (tx_buffer->skb) {
184                 dev_kfree_skb_any(tx_buffer->skb);
185                 if (dma_unmap_len(tx_buffer, len))
186                         dma_unmap_single(tx_ring->dev,
187                                          dma_unmap_addr(tx_buffer, dma),
188                                          dma_unmap_len(tx_buffer, len),
189                                          DMA_TO_DEVICE);
190         } else if (dma_unmap_len(tx_buffer, len)) {
191                 dma_unmap_page(tx_ring->dev,
192                                dma_unmap_addr(tx_buffer, dma),
193                                dma_unmap_len(tx_buffer, len),
194                                DMA_TO_DEVICE);
195         }
196         tx_buffer->next_to_watch = NULL;
197         tx_buffer->skb = NULL;
198         dma_unmap_len_set(tx_buffer, len, 0);
199         /* tx_buffer must be completely set up in the transmit path */
200 }
201
202 #define IXGBE_MAX_TXD_PWR       14
203 #define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
204
205 /* Tx Descriptors needed, worst case */
206 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
207 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
208
209 static void ixgbevf_tx_timeout(struct net_device *netdev);
210
211 /**
212  * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
213  * @q_vector: board private structure
214  * @tx_ring: tx ring to clean
215  **/
216 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
217                                  struct ixgbevf_ring *tx_ring)
218 {
219         struct ixgbevf_adapter *adapter = q_vector->adapter;
220         struct ixgbevf_tx_buffer *tx_buffer;
221         union ixgbe_adv_tx_desc *tx_desc;
222         unsigned int total_bytes = 0, total_packets = 0;
223         unsigned int budget = tx_ring->count / 2;
224         unsigned int i = tx_ring->next_to_clean;
225
226         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
227                 return true;
228
229         tx_buffer = &tx_ring->tx_buffer_info[i];
230         tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
231         i -= tx_ring->count;
232
233         do {
234                 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
235
236                 /* if next_to_watch is not set then there is no work pending */
237                 if (!eop_desc)
238                         break;
239
240                 /* prevent any other reads prior to eop_desc */
241                 read_barrier_depends();
242
243                 /* if DD is not set pending work has not been completed */
244                 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
245                         break;
246
247                 /* clear next_to_watch to prevent false hangs */
248                 tx_buffer->next_to_watch = NULL;
249
250                 /* update the statistics for this packet */
251                 total_bytes += tx_buffer->bytecount;
252                 total_packets += tx_buffer->gso_segs;
253
254                 /* free the skb */
255                 dev_kfree_skb_any(tx_buffer->skb);
256
257                 /* unmap skb header data */
258                 dma_unmap_single(tx_ring->dev,
259                                  dma_unmap_addr(tx_buffer, dma),
260                                  dma_unmap_len(tx_buffer, len),
261                                  DMA_TO_DEVICE);
262
263                 /* clear tx_buffer data */
264                 tx_buffer->skb = NULL;
265                 dma_unmap_len_set(tx_buffer, len, 0);
266
267                 /* unmap remaining buffers */
268                 while (tx_desc != eop_desc) {
269                         tx_buffer++;
270                         tx_desc++;
271                         i++;
272                         if (unlikely(!i)) {
273                                 i -= tx_ring->count;
274                                 tx_buffer = tx_ring->tx_buffer_info;
275                                 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
276                         }
277
278                         /* unmap any remaining paged data */
279                         if (dma_unmap_len(tx_buffer, len)) {
280                                 dma_unmap_page(tx_ring->dev,
281                                                dma_unmap_addr(tx_buffer, dma),
282                                                dma_unmap_len(tx_buffer, len),
283                                                DMA_TO_DEVICE);
284                                 dma_unmap_len_set(tx_buffer, len, 0);
285                         }
286                 }
287
288                 /* move us one more past the eop_desc for start of next pkt */
289                 tx_buffer++;
290                 tx_desc++;
291                 i++;
292                 if (unlikely(!i)) {
293                         i -= tx_ring->count;
294                         tx_buffer = tx_ring->tx_buffer_info;
295                         tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
296                 }
297
298                 /* issue prefetch for next Tx descriptor */
299                 prefetch(tx_desc);
300
301                 /* update budget accounting */
302                 budget--;
303         } while (likely(budget));
304
305         i += tx_ring->count;
306         tx_ring->next_to_clean = i;
307         u64_stats_update_begin(&tx_ring->syncp);
308         tx_ring->stats.bytes += total_bytes;
309         tx_ring->stats.packets += total_packets;
310         u64_stats_update_end(&tx_ring->syncp);
311         q_vector->tx.total_bytes += total_bytes;
312         q_vector->tx.total_packets += total_packets;
313
314 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
315         if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
316                      (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
317                 /* Make sure that anybody stopping the queue after this
318                  * sees the new next_to_clean.
319                  */
320                 smp_mb();
321
322                 if (__netif_subqueue_stopped(tx_ring->netdev,
323                                              tx_ring->queue_index) &&
324                     !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
325                         netif_wake_subqueue(tx_ring->netdev,
326                                             tx_ring->queue_index);
327                         ++tx_ring->tx_stats.restart_queue;
328                 }
329         }
330
331         return !!budget;
332 }
333
334 /**
335  * ixgbevf_rx_skb - Helper function to determine proper Rx method
336  * @q_vector: structure containing interrupt and ring information
337  * @skb: packet to send up
338  **/
339 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
340                            struct sk_buff *skb)
341 {
342 #ifdef CONFIG_NET_RX_BUSY_POLL
343         skb_mark_napi_id(skb, &q_vector->napi);
344
345         if (ixgbevf_qv_busy_polling(q_vector)) {
346                 netif_receive_skb(skb);
347                 /* exit early if we busy polled */
348                 return;
349         }
350 #endif /* CONFIG_NET_RX_BUSY_POLL */
351
352         napi_gro_receive(&q_vector->napi, skb);
353 }
354
355 /* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
356  * @ring: structure containig ring specific data
357  * @rx_desc: current Rx descriptor being processed
358  * @skb: skb currently being received and modified
359  */
360 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
361                                        union ixgbe_adv_rx_desc *rx_desc,
362                                        struct sk_buff *skb)
363 {
364         skb_checksum_none_assert(skb);
365
366         /* Rx csum disabled */
367         if (!(ring->netdev->features & NETIF_F_RXCSUM))
368                 return;
369
370         /* if IP and error */
371         if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
372             ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
373                 ring->rx_stats.csum_err++;
374                 return;
375         }
376
377         if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
378                 return;
379
380         if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
381                 ring->rx_stats.csum_err++;
382                 return;
383         }
384
385         /* It must be a TCP or UDP packet with a valid checksum */
386         skb->ip_summed = CHECKSUM_UNNECESSARY;
387 }
388
389 /* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
390  * @rx_ring: rx descriptor ring packet is being transacted on
391  * @rx_desc: pointer to the EOP Rx descriptor
392  * @skb: pointer to current skb being populated
393  *
394  * This function checks the ring, descriptor, and packet information in
395  * order to populate the checksum, VLAN, protocol, and other fields within
396  * the skb.
397  */
398 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
399                                        union ixgbe_adv_rx_desc *rx_desc,
400                                        struct sk_buff *skb)
401 {
402         ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
403
404         if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
405                 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
406                 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
407
408                 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
409                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
410         }
411
412         skb->protocol = eth_type_trans(skb, rx_ring->netdev);
413 }
414
415 /**
416  * ixgbevf_is_non_eop - process handling of non-EOP buffers
417  * @rx_ring: Rx ring being processed
418  * @rx_desc: Rx descriptor for current buffer
419  * @skb: current socket buffer containing buffer in progress
420  *
421  * This function updates next to clean.  If the buffer is an EOP buffer
422  * this function exits returning false, otherwise it will place the
423  * sk_buff in the next buffer to be chained and return true indicating
424  * that this is in fact a non-EOP buffer.
425  **/
426 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
427                                union ixgbe_adv_rx_desc *rx_desc)
428 {
429         u32 ntc = rx_ring->next_to_clean + 1;
430
431         /* fetch, update, and store next to clean */
432         ntc = (ntc < rx_ring->count) ? ntc : 0;
433         rx_ring->next_to_clean = ntc;
434
435         prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
436
437         if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
438                 return false;
439
440         return true;
441 }
442
443 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
444                                       struct ixgbevf_rx_buffer *bi)
445 {
446         struct page *page = bi->page;
447         dma_addr_t dma = bi->dma;
448
449         /* since we are recycling buffers we should seldom need to alloc */
450         if (likely(page))
451                 return true;
452
453         /* alloc new page for storage */
454         page = dev_alloc_page();
455         if (unlikely(!page)) {
456                 rx_ring->rx_stats.alloc_rx_page_failed++;
457                 return false;
458         }
459
460         /* map page for use */
461         dma = dma_map_page(rx_ring->dev, page, 0,
462                            PAGE_SIZE, DMA_FROM_DEVICE);
463
464         /* if mapping failed free memory back to system since
465          * there isn't much point in holding memory we can't use
466          */
467         if (dma_mapping_error(rx_ring->dev, dma)) {
468                 __free_page(page);
469
470                 rx_ring->rx_stats.alloc_rx_buff_failed++;
471                 return false;
472         }
473
474         bi->dma = dma;
475         bi->page = page;
476         bi->page_offset = 0;
477
478         return true;
479 }
480
481 /**
482  * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
483  * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
484  * @cleaned_count: number of buffers to replace
485  **/
486 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
487                                      u16 cleaned_count)
488 {
489         union ixgbe_adv_rx_desc *rx_desc;
490         struct ixgbevf_rx_buffer *bi;
491         unsigned int i = rx_ring->next_to_use;
492
493         /* nothing to do or no valid netdev defined */
494         if (!cleaned_count || !rx_ring->netdev)
495                 return;
496
497         rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
498         bi = &rx_ring->rx_buffer_info[i];
499         i -= rx_ring->count;
500
501         do {
502                 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
503                         break;
504
505                 /* Refresh the desc even if pkt_addr didn't change
506                  * because each write-back erases this info.
507                  */
508                 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
509
510                 rx_desc++;
511                 bi++;
512                 i++;
513                 if (unlikely(!i)) {
514                         rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
515                         bi = rx_ring->rx_buffer_info;
516                         i -= rx_ring->count;
517                 }
518
519                 /* clear the hdr_addr for the next_to_use descriptor */
520                 rx_desc->read.hdr_addr = 0;
521
522                 cleaned_count--;
523         } while (cleaned_count);
524
525         i += rx_ring->count;
526
527         if (rx_ring->next_to_use != i) {
528                 /* record the next descriptor to use */
529                 rx_ring->next_to_use = i;
530
531                 /* update next to alloc since we have filled the ring */
532                 rx_ring->next_to_alloc = i;
533
534                 /* Force memory writes to complete before letting h/w
535                  * know there are new descriptors to fetch.  (Only
536                  * applicable for weak-ordered memory model archs,
537                  * such as IA-64).
538                  */
539                 wmb();
540                 ixgbevf_write_tail(rx_ring, i);
541         }
542 }
543
544 /* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
545  * @rx_ring: rx descriptor ring packet is being transacted on
546  * @skb: pointer to current skb being adjusted
547  *
548  * This function is an ixgbevf specific version of __pskb_pull_tail.  The
549  * main difference between this version and the original function is that
550  * this function can make several assumptions about the state of things
551  * that allow for significant optimizations versus the standard function.
552  * As a result we can do things like drop a frag and maintain an accurate
553  * truesize for the skb.
554  */
555 static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
556                               struct sk_buff *skb)
557 {
558         struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
559         unsigned char *va;
560         unsigned int pull_len;
561
562         /* it is valid to use page_address instead of kmap since we are
563          * working with pages allocated out of the lomem pool per
564          * alloc_page(GFP_ATOMIC)
565          */
566         va = skb_frag_address(frag);
567
568         /* we need the header to contain the greater of either ETH_HLEN or
569          * 60 bytes if the skb->len is less than 60 for skb_pad.
570          */
571         pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
572
573         /* align pull length to size of long to optimize memcpy performance */
574         skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
575
576         /* update all of the pointers */
577         skb_frag_size_sub(frag, pull_len);
578         frag->page_offset += pull_len;
579         skb->data_len -= pull_len;
580         skb->tail += pull_len;
581 }
582
583 /* ixgbevf_cleanup_headers - Correct corrupted or empty headers
584  * @rx_ring: rx descriptor ring packet is being transacted on
585  * @rx_desc: pointer to the EOP Rx descriptor
586  * @skb: pointer to current skb being fixed
587  *
588  * Check for corrupted packet headers caused by senders on the local L2
589  * embedded NIC switch not setting up their Tx Descriptors right.  These
590  * should be very rare.
591  *
592  * Also address the case where we are pulling data in on pages only
593  * and as such no data is present in the skb header.
594  *
595  * In addition if skb is not at least 60 bytes we need to pad it so that
596  * it is large enough to qualify as a valid Ethernet frame.
597  *
598  * Returns true if an error was encountered and skb was freed.
599  */
600 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
601                                     union ixgbe_adv_rx_desc *rx_desc,
602                                     struct sk_buff *skb)
603 {
604         /* verify that the packet does not have any known errors */
605         if (unlikely(ixgbevf_test_staterr(rx_desc,
606                                           IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
607                 struct net_device *netdev = rx_ring->netdev;
608
609                 if (!(netdev->features & NETIF_F_RXALL)) {
610                         dev_kfree_skb_any(skb);
611                         return true;
612                 }
613         }
614
615         /* place header in linear portion of buffer */
616         if (skb_is_nonlinear(skb))
617                 ixgbevf_pull_tail(rx_ring, skb);
618
619         /* if skb_pad returns an error the skb was freed */
620         if (unlikely(skb->len < 60)) {
621                 int pad_len = 60 - skb->len;
622
623                 if (skb_pad(skb, pad_len))
624                         return true;
625                 __skb_put(skb, pad_len);
626         }
627
628         return false;
629 }
630
631 /* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
632  * @rx_ring: rx descriptor ring to store buffers on
633  * @old_buff: donor buffer to have page reused
634  *
635  * Synchronizes page for reuse by the adapter
636  */
637 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
638                                   struct ixgbevf_rx_buffer *old_buff)
639 {
640         struct ixgbevf_rx_buffer *new_buff;
641         u16 nta = rx_ring->next_to_alloc;
642
643         new_buff = &rx_ring->rx_buffer_info[nta];
644
645         /* update, and store next to alloc */
646         nta++;
647         rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
648
649         /* transfer page from old buffer to new buffer */
650         new_buff->page = old_buff->page;
651         new_buff->dma = old_buff->dma;
652         new_buff->page_offset = old_buff->page_offset;
653
654         /* sync the buffer for use by the device */
655         dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
656                                          new_buff->page_offset,
657                                          IXGBEVF_RX_BUFSZ,
658                                          DMA_FROM_DEVICE);
659 }
660
661 static inline bool ixgbevf_page_is_reserved(struct page *page)
662 {
663         return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
664 }
665
666 /* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
667  * @rx_ring: rx descriptor ring to transact packets on
668  * @rx_buffer: buffer containing page to add
669  * @rx_desc: descriptor containing length of buffer written by hardware
670  * @skb: sk_buff to place the data into
671  *
672  * This function will add the data contained in rx_buffer->page to the skb.
673  * This is done either through a direct copy if the data in the buffer is
674  * less than the skb header size, otherwise it will just attach the page as
675  * a frag to the skb.
676  *
677  * The function will then update the page offset if necessary and return
678  * true if the buffer can be reused by the adapter.
679  */
680 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
681                                 struct ixgbevf_rx_buffer *rx_buffer,
682                                 union ixgbe_adv_rx_desc *rx_desc,
683                                 struct sk_buff *skb)
684 {
685         struct page *page = rx_buffer->page;
686         unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
687 #if (PAGE_SIZE < 8192)
688         unsigned int truesize = IXGBEVF_RX_BUFSZ;
689 #else
690         unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
691 #endif
692
693         if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
694                 unsigned char *va = page_address(page) + rx_buffer->page_offset;
695
696                 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
697
698                 /* page is not reserved, we can reuse buffer as is */
699                 if (likely(!ixgbevf_page_is_reserved(page)))
700                         return true;
701
702                 /* this page cannot be reused so discard it */
703                 put_page(page);
704                 return false;
705         }
706
707         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
708                         rx_buffer->page_offset, size, truesize);
709
710         /* avoid re-using remote pages */
711         if (unlikely(ixgbevf_page_is_reserved(page)))
712                 return false;
713
714 #if (PAGE_SIZE < 8192)
715         /* if we are only owner of page we can reuse it */
716         if (unlikely(page_count(page) != 1))
717                 return false;
718
719         /* flip page offset to other buffer */
720         rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
721
722 #else
723         /* move offset up to the next cache line */
724         rx_buffer->page_offset += truesize;
725
726         if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
727                 return false;
728
729 #endif
730         /* Even if we own the page, we are not allowed to use atomic_set()
731          * This would break get_page_unless_zero() users.
732          */
733         atomic_inc(&page->_count);
734
735         return true;
736 }
737
738 static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
739                                                union ixgbe_adv_rx_desc *rx_desc,
740                                                struct sk_buff *skb)
741 {
742         struct ixgbevf_rx_buffer *rx_buffer;
743         struct page *page;
744
745         rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
746         page = rx_buffer->page;
747         prefetchw(page);
748
749         if (likely(!skb)) {
750                 void *page_addr = page_address(page) +
751                                   rx_buffer->page_offset;
752
753                 /* prefetch first cache line of first page */
754                 prefetch(page_addr);
755 #if L1_CACHE_BYTES < 128
756                 prefetch(page_addr + L1_CACHE_BYTES);
757 #endif
758
759                 /* allocate a skb to store the frags */
760                 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
761                                                 IXGBEVF_RX_HDR_SIZE);
762                 if (unlikely(!skb)) {
763                         rx_ring->rx_stats.alloc_rx_buff_failed++;
764                         return NULL;
765                 }
766
767                 /* we will be copying header into skb->data in
768                  * pskb_may_pull so it is in our interest to prefetch
769                  * it now to avoid a possible cache miss
770                  */
771                 prefetchw(skb->data);
772         }
773
774         /* we are reusing so sync this buffer for CPU use */
775         dma_sync_single_range_for_cpu(rx_ring->dev,
776                                       rx_buffer->dma,
777                                       rx_buffer->page_offset,
778                                       IXGBEVF_RX_BUFSZ,
779                                       DMA_FROM_DEVICE);
780
781         /* pull page into skb */
782         if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
783                 /* hand second half of page back to the ring */
784                 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
785         } else {
786                 /* we are not reusing the buffer so unmap it */
787                 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
788                                PAGE_SIZE, DMA_FROM_DEVICE);
789         }
790
791         /* clear contents of buffer_info */
792         rx_buffer->dma = 0;
793         rx_buffer->page = NULL;
794
795         return skb;
796 }
797
798 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
799                                              u32 qmask)
800 {
801         struct ixgbe_hw *hw = &adapter->hw;
802
803         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
804 }
805
806 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
807                                 struct ixgbevf_ring *rx_ring,
808                                 int budget)
809 {
810         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
811         u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
812         struct sk_buff *skb = rx_ring->skb;
813
814         while (likely(total_rx_packets < budget)) {
815                 union ixgbe_adv_rx_desc *rx_desc;
816
817                 /* return some buffers to hardware, one at a time is too slow */
818                 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
819                         ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
820                         cleaned_count = 0;
821                 }
822
823                 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
824
825                 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
826                         break;
827
828                 /* This memory barrier is needed to keep us from reading
829                  * any other fields out of the rx_desc until we know the
830                  * RXD_STAT_DD bit is set
831                  */
832                 rmb();
833
834                 /* retrieve a buffer from the ring */
835                 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
836
837                 /* exit if we failed to retrieve a buffer */
838                 if (!skb)
839                         break;
840
841                 cleaned_count++;
842
843                 /* fetch next buffer in frame if non-eop */
844                 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
845                         continue;
846
847                 /* verify the packet layout is correct */
848                 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
849                         skb = NULL;
850                         continue;
851                 }
852
853                 /* probably a little skewed due to removing CRC */
854                 total_rx_bytes += skb->len;
855
856                 /* Workaround hardware that can't do proper VEPA multicast
857                  * source pruning.
858                  */
859                 if ((skb->pkt_type == PACKET_BROADCAST ||
860                     skb->pkt_type == PACKET_MULTICAST) &&
861                     ether_addr_equal(rx_ring->netdev->dev_addr,
862                                      eth_hdr(skb)->h_source)) {
863                         dev_kfree_skb_irq(skb);
864                         continue;
865                 }
866
867                 /* populate checksum, VLAN, and protocol */
868                 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
869
870                 ixgbevf_rx_skb(q_vector, skb);
871
872                 /* reset skb pointer */
873                 skb = NULL;
874
875                 /* update budget accounting */
876                 total_rx_packets++;
877         }
878
879         /* place incomplete frames back on ring for completion */
880         rx_ring->skb = skb;
881
882         u64_stats_update_begin(&rx_ring->syncp);
883         rx_ring->stats.packets += total_rx_packets;
884         rx_ring->stats.bytes += total_rx_bytes;
885         u64_stats_update_end(&rx_ring->syncp);
886         q_vector->rx.total_packets += total_rx_packets;
887         q_vector->rx.total_bytes += total_rx_bytes;
888
889         return total_rx_packets;
890 }
891
892 /**
893  * ixgbevf_poll - NAPI polling calback
894  * @napi: napi struct with our devices info in it
895  * @budget: amount of work driver is allowed to do this pass, in packets
896  *
897  * This function will clean more than one or more rings associated with a
898  * q_vector.
899  **/
900 static int ixgbevf_poll(struct napi_struct *napi, int budget)
901 {
902         struct ixgbevf_q_vector *q_vector =
903                 container_of(napi, struct ixgbevf_q_vector, napi);
904         struct ixgbevf_adapter *adapter = q_vector->adapter;
905         struct ixgbevf_ring *ring;
906         int per_ring_budget;
907         bool clean_complete = true;
908
909         ixgbevf_for_each_ring(ring, q_vector->tx)
910                 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
911
912 #ifdef CONFIG_NET_RX_BUSY_POLL
913         if (!ixgbevf_qv_lock_napi(q_vector))
914                 return budget;
915 #endif
916
917         /* attempt to distribute budget to each queue fairly, but don't allow
918          * the budget to go below 1 because we'll exit polling */
919         if (q_vector->rx.count > 1)
920                 per_ring_budget = max(budget/q_vector->rx.count, 1);
921         else
922                 per_ring_budget = budget;
923
924         ixgbevf_for_each_ring(ring, q_vector->rx)
925                 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
926                                                         per_ring_budget)
927                                    < per_ring_budget);
928
929 #ifdef CONFIG_NET_RX_BUSY_POLL
930         ixgbevf_qv_unlock_napi(q_vector);
931 #endif
932
933         /* If all work not completed, return budget and keep polling */
934         if (!clean_complete)
935                 return budget;
936         /* all work done, exit the polling mode */
937         napi_complete(napi);
938         if (adapter->rx_itr_setting & 1)
939                 ixgbevf_set_itr(q_vector);
940         if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
941             !test_bit(__IXGBEVF_REMOVING, &adapter->state))
942                 ixgbevf_irq_enable_queues(adapter,
943                                           1 << q_vector->v_idx);
944
945         return 0;
946 }
947
948 /**
949  * ixgbevf_write_eitr - write VTEITR register in hardware specific way
950  * @q_vector: structure containing interrupt and ring information
951  */
952 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
953 {
954         struct ixgbevf_adapter *adapter = q_vector->adapter;
955         struct ixgbe_hw *hw = &adapter->hw;
956         int v_idx = q_vector->v_idx;
957         u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
958
959         /*
960          * set the WDIS bit to not clear the timer bits and cause an
961          * immediate assertion of the interrupt
962          */
963         itr_reg |= IXGBE_EITR_CNT_WDIS;
964
965         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
966 }
967
968 #ifdef CONFIG_NET_RX_BUSY_POLL
969 /* must be called with local_bh_disable()d */
970 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
971 {
972         struct ixgbevf_q_vector *q_vector =
973                         container_of(napi, struct ixgbevf_q_vector, napi);
974         struct ixgbevf_adapter *adapter = q_vector->adapter;
975         struct ixgbevf_ring  *ring;
976         int found = 0;
977
978         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
979                 return LL_FLUSH_FAILED;
980
981         if (!ixgbevf_qv_lock_poll(q_vector))
982                 return LL_FLUSH_BUSY;
983
984         ixgbevf_for_each_ring(ring, q_vector->rx) {
985                 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
986 #ifdef BP_EXTENDED_STATS
987                 if (found)
988                         ring->stats.cleaned += found;
989                 else
990                         ring->stats.misses++;
991 #endif
992                 if (found)
993                         break;
994         }
995
996         ixgbevf_qv_unlock_poll(q_vector);
997
998         return found;
999 }
1000 #endif /* CONFIG_NET_RX_BUSY_POLL */
1001
1002 /**
1003  * ixgbevf_configure_msix - Configure MSI-X hardware
1004  * @adapter: board private structure
1005  *
1006  * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1007  * interrupts.
1008  **/
1009 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1010 {
1011         struct ixgbevf_q_vector *q_vector;
1012         int q_vectors, v_idx;
1013
1014         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1015         adapter->eims_enable_mask = 0;
1016
1017         /*
1018          * Populate the IVAR table and set the ITR values to the
1019          * corresponding register.
1020          */
1021         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1022                 struct ixgbevf_ring *ring;
1023                 q_vector = adapter->q_vector[v_idx];
1024
1025                 ixgbevf_for_each_ring(ring, q_vector->rx)
1026                         ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1027
1028                 ixgbevf_for_each_ring(ring, q_vector->tx)
1029                         ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1030
1031                 if (q_vector->tx.ring && !q_vector->rx.ring) {
1032                         /* tx only vector */
1033                         if (adapter->tx_itr_setting == 1)
1034                                 q_vector->itr = IXGBE_10K_ITR;
1035                         else
1036                                 q_vector->itr = adapter->tx_itr_setting;
1037                 } else {
1038                         /* rx or rx/tx vector */
1039                         if (adapter->rx_itr_setting == 1)
1040                                 q_vector->itr = IXGBE_20K_ITR;
1041                         else
1042                                 q_vector->itr = adapter->rx_itr_setting;
1043                 }
1044
1045                 /* add q_vector eims value to global eims_enable_mask */
1046                 adapter->eims_enable_mask |= 1 << v_idx;
1047
1048                 ixgbevf_write_eitr(q_vector);
1049         }
1050
1051         ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1052         /* setup eims_other and add value to global eims_enable_mask */
1053         adapter->eims_other = 1 << v_idx;
1054         adapter->eims_enable_mask |= adapter->eims_other;
1055 }
1056
1057 enum latency_range {
1058         lowest_latency = 0,
1059         low_latency = 1,
1060         bulk_latency = 2,
1061         latency_invalid = 255
1062 };
1063
1064 /**
1065  * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1066  * @q_vector: structure containing interrupt and ring information
1067  * @ring_container: structure containing ring performance data
1068  *
1069  *      Stores a new ITR value based on packets and byte
1070  *      counts during the last interrupt.  The advantage of per interrupt
1071  *      computation is faster updates and more accurate ITR for the current
1072  *      traffic pattern.  Constants in this function were computed
1073  *      based on theoretical maximum wire speed and thresholds were set based
1074  *      on testing data as well as attempting to minimize response time
1075  *      while increasing bulk throughput.
1076  **/
1077 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1078                                struct ixgbevf_ring_container *ring_container)
1079 {
1080         int bytes = ring_container->total_bytes;
1081         int packets = ring_container->total_packets;
1082         u32 timepassed_us;
1083         u64 bytes_perint;
1084         u8 itr_setting = ring_container->itr;
1085
1086         if (packets == 0)
1087                 return;
1088
1089         /* simple throttlerate management
1090          *    0-20MB/s lowest (100000 ints/s)
1091          *   20-100MB/s low   (20000 ints/s)
1092          *  100-1249MB/s bulk (8000 ints/s)
1093          */
1094         /* what was last interrupt timeslice? */
1095         timepassed_us = q_vector->itr >> 2;
1096         bytes_perint = bytes / timepassed_us; /* bytes/usec */
1097
1098         switch (itr_setting) {
1099         case lowest_latency:
1100                 if (bytes_perint > 10)
1101                         itr_setting = low_latency;
1102                 break;
1103         case low_latency:
1104                 if (bytes_perint > 20)
1105                         itr_setting = bulk_latency;
1106                 else if (bytes_perint <= 10)
1107                         itr_setting = lowest_latency;
1108                 break;
1109         case bulk_latency:
1110                 if (bytes_perint <= 20)
1111                         itr_setting = low_latency;
1112                 break;
1113         }
1114
1115         /* clear work counters since we have the values we need */
1116         ring_container->total_bytes = 0;
1117         ring_container->total_packets = 0;
1118
1119         /* write updated itr to ring container */
1120         ring_container->itr = itr_setting;
1121 }
1122
1123 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1124 {
1125         u32 new_itr = q_vector->itr;
1126         u8 current_itr;
1127
1128         ixgbevf_update_itr(q_vector, &q_vector->tx);
1129         ixgbevf_update_itr(q_vector, &q_vector->rx);
1130
1131         current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1132
1133         switch (current_itr) {
1134         /* counts and packets in update_itr are dependent on these numbers */
1135         case lowest_latency:
1136                 new_itr = IXGBE_100K_ITR;
1137                 break;
1138         case low_latency:
1139                 new_itr = IXGBE_20K_ITR;
1140                 break;
1141         case bulk_latency:
1142         default:
1143                 new_itr = IXGBE_8K_ITR;
1144                 break;
1145         }
1146
1147         if (new_itr != q_vector->itr) {
1148                 /* do an exponential smoothing */
1149                 new_itr = (10 * new_itr * q_vector->itr) /
1150                           ((9 * new_itr) + q_vector->itr);
1151
1152                 /* save the algorithm value here */
1153                 q_vector->itr = new_itr;
1154
1155                 ixgbevf_write_eitr(q_vector);
1156         }
1157 }
1158
1159 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1160 {
1161         struct ixgbevf_adapter *adapter = data;
1162         struct ixgbe_hw *hw = &adapter->hw;
1163
1164         hw->mac.get_link_status = 1;
1165
1166         if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1167             !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1168                 mod_timer(&adapter->watchdog_timer, jiffies);
1169
1170         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1171
1172         return IRQ_HANDLED;
1173 }
1174
1175 /**
1176  * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1177  * @irq: unused
1178  * @data: pointer to our q_vector struct for this interrupt vector
1179  **/
1180 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1181 {
1182         struct ixgbevf_q_vector *q_vector = data;
1183
1184         /* EIAM disabled interrupts (on this vector) for us */
1185         if (q_vector->rx.ring || q_vector->tx.ring)
1186                 napi_schedule(&q_vector->napi);
1187
1188         return IRQ_HANDLED;
1189 }
1190
1191 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1192                                      int r_idx)
1193 {
1194         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1195
1196         a->rx_ring[r_idx]->next = q_vector->rx.ring;
1197         q_vector->rx.ring = a->rx_ring[r_idx];
1198         q_vector->rx.count++;
1199 }
1200
1201 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1202                                      int t_idx)
1203 {
1204         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1205
1206         a->tx_ring[t_idx]->next = q_vector->tx.ring;
1207         q_vector->tx.ring = a->tx_ring[t_idx];
1208         q_vector->tx.count++;
1209 }
1210
1211 /**
1212  * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1213  * @adapter: board private structure to initialize
1214  *
1215  * This function maps descriptor rings to the queue-specific vectors
1216  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
1217  * one vector per ring/queue, but on a constrained vector budget, we
1218  * group the rings as "efficiently" as possible.  You would add new
1219  * mapping configurations in here.
1220  **/
1221 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1222 {
1223         int q_vectors;
1224         int v_start = 0;
1225         int rxr_idx = 0, txr_idx = 0;
1226         int rxr_remaining = adapter->num_rx_queues;
1227         int txr_remaining = adapter->num_tx_queues;
1228         int i, j;
1229         int rqpv, tqpv;
1230         int err = 0;
1231
1232         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1233
1234         /*
1235          * The ideal configuration...
1236          * We have enough vectors to map one per queue.
1237          */
1238         if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1239                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1240                         map_vector_to_rxq(adapter, v_start, rxr_idx);
1241
1242                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1243                         map_vector_to_txq(adapter, v_start, txr_idx);
1244                 goto out;
1245         }
1246
1247         /*
1248          * If we don't have enough vectors for a 1-to-1
1249          * mapping, we'll have to group them so there are
1250          * multiple queues per vector.
1251          */
1252         /* Re-adjusting *qpv takes care of the remainder. */
1253         for (i = v_start; i < q_vectors; i++) {
1254                 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1255                 for (j = 0; j < rqpv; j++) {
1256                         map_vector_to_rxq(adapter, i, rxr_idx);
1257                         rxr_idx++;
1258                         rxr_remaining--;
1259                 }
1260         }
1261         for (i = v_start; i < q_vectors; i++) {
1262                 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1263                 for (j = 0; j < tqpv; j++) {
1264                         map_vector_to_txq(adapter, i, txr_idx);
1265                         txr_idx++;
1266                         txr_remaining--;
1267                 }
1268         }
1269
1270 out:
1271         return err;
1272 }
1273
1274 /**
1275  * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1276  * @adapter: board private structure
1277  *
1278  * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1279  * interrupts from the kernel.
1280  **/
1281 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1282 {
1283         struct net_device *netdev = adapter->netdev;
1284         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1285         int vector, err;
1286         int ri = 0, ti = 0;
1287
1288         for (vector = 0; vector < q_vectors; vector++) {
1289                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1290                 struct msix_entry *entry = &adapter->msix_entries[vector];
1291
1292                 if (q_vector->tx.ring && q_vector->rx.ring) {
1293                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1294                                  "%s-%s-%d", netdev->name, "TxRx", ri++);
1295                         ti++;
1296                 } else if (q_vector->rx.ring) {
1297                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1298                                  "%s-%s-%d", netdev->name, "rx", ri++);
1299                 } else if (q_vector->tx.ring) {
1300                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1301                                  "%s-%s-%d", netdev->name, "tx", ti++);
1302                 } else {
1303                         /* skip this unused q_vector */
1304                         continue;
1305                 }
1306                 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1307                                   q_vector->name, q_vector);
1308                 if (err) {
1309                         hw_dbg(&adapter->hw,
1310                                "request_irq failed for MSIX interrupt "
1311                                "Error: %d\n", err);
1312                         goto free_queue_irqs;
1313                 }
1314         }
1315
1316         err = request_irq(adapter->msix_entries[vector].vector,
1317                           &ixgbevf_msix_other, 0, netdev->name, adapter);
1318         if (err) {
1319                 hw_dbg(&adapter->hw,
1320                        "request_irq for msix_other failed: %d\n", err);
1321                 goto free_queue_irqs;
1322         }
1323
1324         return 0;
1325
1326 free_queue_irqs:
1327         while (vector) {
1328                 vector--;
1329                 free_irq(adapter->msix_entries[vector].vector,
1330                          adapter->q_vector[vector]);
1331         }
1332         /* This failure is non-recoverable - it indicates the system is
1333          * out of MSIX vector resources and the VF driver cannot run
1334          * without them.  Set the number of msix vectors to zero
1335          * indicating that not enough can be allocated.  The error
1336          * will be returned to the user indicating device open failed.
1337          * Any further attempts to force the driver to open will also
1338          * fail.  The only way to recover is to unload the driver and
1339          * reload it again.  If the system has recovered some MSIX
1340          * vectors then it may succeed.
1341          */
1342         adapter->num_msix_vectors = 0;
1343         return err;
1344 }
1345
1346 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1347 {
1348         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1349
1350         for (i = 0; i < q_vectors; i++) {
1351                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1352                 q_vector->rx.ring = NULL;
1353                 q_vector->tx.ring = NULL;
1354                 q_vector->rx.count = 0;
1355                 q_vector->tx.count = 0;
1356         }
1357 }
1358
1359 /**
1360  * ixgbevf_request_irq - initialize interrupts
1361  * @adapter: board private structure
1362  *
1363  * Attempts to configure interrupts using the best available
1364  * capabilities of the hardware and kernel.
1365  **/
1366 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1367 {
1368         int err = 0;
1369
1370         err = ixgbevf_request_msix_irqs(adapter);
1371
1372         if (err)
1373                 hw_dbg(&adapter->hw,
1374                        "request_irq failed, Error %d\n", err);
1375
1376         return err;
1377 }
1378
1379 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1380 {
1381         int i, q_vectors;
1382
1383         q_vectors = adapter->num_msix_vectors;
1384         i = q_vectors - 1;
1385
1386         free_irq(adapter->msix_entries[i].vector, adapter);
1387         i--;
1388
1389         for (; i >= 0; i--) {
1390                 /* free only the irqs that were actually requested */
1391                 if (!adapter->q_vector[i]->rx.ring &&
1392                     !adapter->q_vector[i]->tx.ring)
1393                         continue;
1394
1395                 free_irq(adapter->msix_entries[i].vector,
1396                          adapter->q_vector[i]);
1397         }
1398
1399         ixgbevf_reset_q_vectors(adapter);
1400 }
1401
1402 /**
1403  * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1404  * @adapter: board private structure
1405  **/
1406 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1407 {
1408         struct ixgbe_hw *hw = &adapter->hw;
1409         int i;
1410
1411         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1412         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1413         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1414
1415         IXGBE_WRITE_FLUSH(hw);
1416
1417         for (i = 0; i < adapter->num_msix_vectors; i++)
1418                 synchronize_irq(adapter->msix_entries[i].vector);
1419 }
1420
1421 /**
1422  * ixgbevf_irq_enable - Enable default interrupt generation settings
1423  * @adapter: board private structure
1424  **/
1425 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1426 {
1427         struct ixgbe_hw *hw = &adapter->hw;
1428
1429         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1430         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1431         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1432 }
1433
1434 /**
1435  * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1436  * @adapter: board private structure
1437  * @ring: structure containing ring specific data
1438  *
1439  * Configure the Tx descriptor ring after a reset.
1440  **/
1441 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1442                                       struct ixgbevf_ring *ring)
1443 {
1444         struct ixgbe_hw *hw = &adapter->hw;
1445         u64 tdba = ring->dma;
1446         int wait_loop = 10;
1447         u32 txdctl = IXGBE_TXDCTL_ENABLE;
1448         u8 reg_idx = ring->reg_idx;
1449
1450         /* disable queue to avoid issues while updating state */
1451         IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1452         IXGBE_WRITE_FLUSH(hw);
1453
1454         IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1455         IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1456         IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1457                         ring->count * sizeof(union ixgbe_adv_tx_desc));
1458
1459         /* disable head writeback */
1460         IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1461         IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1462
1463         /* enable relaxed ordering */
1464         IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1465                         (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1466                          IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1467
1468         /* reset head and tail pointers */
1469         IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1470         IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1471         ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1472
1473         /* reset ntu and ntc to place SW in sync with hardwdare */
1474         ring->next_to_clean = 0;
1475         ring->next_to_use = 0;
1476
1477         /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1478          * to or less than the number of on chip descriptors, which is
1479          * currently 40.
1480          */
1481         txdctl |= (8 << 16);    /* WTHRESH = 8 */
1482
1483         /* Setting PTHRESH to 32 both improves performance */
1484         txdctl |= (1 << 8) |    /* HTHRESH = 1 */
1485                   32;          /* PTHRESH = 32 */
1486
1487         IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1488
1489         /* poll to verify queue is enabled */
1490         do {
1491                 usleep_range(1000, 2000);
1492                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1493         }  while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1494         if (!wait_loop)
1495                 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1496 }
1497
1498 /**
1499  * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1500  * @adapter: board private structure
1501  *
1502  * Configure the Tx unit of the MAC after a reset.
1503  **/
1504 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1505 {
1506         u32 i;
1507
1508         /* Setup the HW Tx Head and Tail descriptor pointers */
1509         for (i = 0; i < adapter->num_tx_queues; i++)
1510                 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1511 }
1512
1513 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1514
1515 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1516 {
1517         struct ixgbe_hw *hw = &adapter->hw;
1518         u32 srrctl;
1519
1520         srrctl = IXGBE_SRRCTL_DROP_EN;
1521
1522         srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1523         srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1524         srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1525
1526         IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1527 }
1528
1529 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1530 {
1531         struct ixgbe_hw *hw = &adapter->hw;
1532
1533         /* PSRTYPE must be initialized in 82599 */
1534         u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1535                       IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1536                       IXGBE_PSRTYPE_L2HDR;
1537
1538         if (adapter->num_rx_queues > 1)
1539                 psrtype |= 1 << 29;
1540
1541         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1542 }
1543
1544 #define IXGBEVF_MAX_RX_DESC_POLL 10
1545 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1546                                      struct ixgbevf_ring *ring)
1547 {
1548         struct ixgbe_hw *hw = &adapter->hw;
1549         int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1550         u32 rxdctl;
1551         u8 reg_idx = ring->reg_idx;
1552
1553         if (IXGBE_REMOVED(hw->hw_addr))
1554                 return;
1555         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1556         rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1557
1558         /* write value back with RXDCTL.ENABLE bit cleared */
1559         IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1560
1561         /* the hardware may take up to 100us to really disable the rx queue */
1562         do {
1563                 udelay(10);
1564                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1565         } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1566
1567         if (!wait_loop)
1568                 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1569                        reg_idx);
1570 }
1571
1572 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1573                                          struct ixgbevf_ring *ring)
1574 {
1575         struct ixgbe_hw *hw = &adapter->hw;
1576         int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1577         u32 rxdctl;
1578         u8 reg_idx = ring->reg_idx;
1579
1580         if (IXGBE_REMOVED(hw->hw_addr))
1581                 return;
1582         do {
1583                 usleep_range(1000, 2000);
1584                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1585         } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1586
1587         if (!wait_loop)
1588                 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1589                        reg_idx);
1590 }
1591
1592 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1593                                       struct ixgbevf_ring *ring)
1594 {
1595         struct ixgbe_hw *hw = &adapter->hw;
1596         u64 rdba = ring->dma;
1597         u32 rxdctl;
1598         u8 reg_idx = ring->reg_idx;
1599
1600         /* disable queue to avoid issues while updating state */
1601         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1602         ixgbevf_disable_rx_queue(adapter, ring);
1603
1604         IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1605         IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1606         IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1607                         ring->count * sizeof(union ixgbe_adv_rx_desc));
1608
1609         /* enable relaxed ordering */
1610         IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1611                         IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1612
1613         /* reset head and tail pointers */
1614         IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1615         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1616         ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1617
1618         /* reset ntu and ntc to place SW in sync with hardwdare */
1619         ring->next_to_clean = 0;
1620         ring->next_to_use = 0;
1621         ring->next_to_alloc = 0;
1622
1623         ixgbevf_configure_srrctl(adapter, reg_idx);
1624
1625         /* allow any size packet since we can handle overflow */
1626         rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1627
1628         rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1629         IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1630
1631         ixgbevf_rx_desc_queue_enable(adapter, ring);
1632         ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1633 }
1634
1635 /**
1636  * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1637  * @adapter: board private structure
1638  *
1639  * Configure the Rx unit of the MAC after a reset.
1640  **/
1641 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1642 {
1643         int i;
1644         struct ixgbe_hw *hw = &adapter->hw;
1645         struct net_device *netdev = adapter->netdev;
1646
1647         ixgbevf_setup_psrtype(adapter);
1648
1649         /* notify the PF of our intent to use this size of frame */
1650         ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
1651
1652         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1653          * the Base and Length of the Rx Descriptor Ring */
1654         for (i = 0; i < adapter->num_rx_queues; i++)
1655                 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1656 }
1657
1658 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1659                                    __be16 proto, u16 vid)
1660 {
1661         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1662         struct ixgbe_hw *hw = &adapter->hw;
1663         int err;
1664
1665         spin_lock_bh(&adapter->mbx_lock);
1666
1667         /* add VID to filter table */
1668         err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1669
1670         spin_unlock_bh(&adapter->mbx_lock);
1671
1672         /* translate error return types so error makes sense */
1673         if (err == IXGBE_ERR_MBX)
1674                 return -EIO;
1675
1676         if (err == IXGBE_ERR_INVALID_ARGUMENT)
1677                 return -EACCES;
1678
1679         set_bit(vid, adapter->active_vlans);
1680
1681         return err;
1682 }
1683
1684 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1685                                     __be16 proto, u16 vid)
1686 {
1687         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1688         struct ixgbe_hw *hw = &adapter->hw;
1689         int err = -EOPNOTSUPP;
1690
1691         spin_lock_bh(&adapter->mbx_lock);
1692
1693         /* remove VID from filter table */
1694         err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1695
1696         spin_unlock_bh(&adapter->mbx_lock);
1697
1698         clear_bit(vid, adapter->active_vlans);
1699
1700         return err;
1701 }
1702
1703 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1704 {
1705         u16 vid;
1706
1707         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1708                 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1709                                         htons(ETH_P_8021Q), vid);
1710 }
1711
1712 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1713 {
1714         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1715         struct ixgbe_hw *hw = &adapter->hw;
1716         int count = 0;
1717
1718         if ((netdev_uc_count(netdev)) > 10) {
1719                 pr_err("Too many unicast filters - No Space\n");
1720                 return -ENOSPC;
1721         }
1722
1723         if (!netdev_uc_empty(netdev)) {
1724                 struct netdev_hw_addr *ha;
1725                 netdev_for_each_uc_addr(ha, netdev) {
1726                         hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1727                         udelay(200);
1728                 }
1729         } else {
1730                 /*
1731                  * If the list is empty then send message to PF driver to
1732                  * clear all macvlans on this VF.
1733                  */
1734                 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1735         }
1736
1737         return count;
1738 }
1739
1740 /**
1741  * ixgbevf_set_rx_mode - Multicast and unicast set
1742  * @netdev: network interface device structure
1743  *
1744  * The set_rx_method entry point is called whenever the multicast address
1745  * list, unicast address list or the network interface flags are updated.
1746  * This routine is responsible for configuring the hardware for proper
1747  * multicast mode and configuring requested unicast filters.
1748  **/
1749 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1750 {
1751         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1752         struct ixgbe_hw *hw = &adapter->hw;
1753
1754         spin_lock_bh(&adapter->mbx_lock);
1755
1756         /* reprogram multicast list */
1757         hw->mac.ops.update_mc_addr_list(hw, netdev);
1758
1759         ixgbevf_write_uc_addr_list(netdev);
1760
1761         spin_unlock_bh(&adapter->mbx_lock);
1762 }
1763
1764 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1765 {
1766         int q_idx;
1767         struct ixgbevf_q_vector *q_vector;
1768         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1769
1770         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1771                 q_vector = adapter->q_vector[q_idx];
1772 #ifdef CONFIG_NET_RX_BUSY_POLL
1773                 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1774 #endif
1775                 napi_enable(&q_vector->napi);
1776         }
1777 }
1778
1779 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1780 {
1781         int q_idx;
1782         struct ixgbevf_q_vector *q_vector;
1783         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1784
1785         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1786                 q_vector = adapter->q_vector[q_idx];
1787                 napi_disable(&q_vector->napi);
1788 #ifdef CONFIG_NET_RX_BUSY_POLL
1789                 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1790                         pr_info("QV %d locked\n", q_idx);
1791                         usleep_range(1000, 20000);
1792                 }
1793 #endif /* CONFIG_NET_RX_BUSY_POLL */
1794         }
1795 }
1796
1797 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1798 {
1799         struct ixgbe_hw *hw = &adapter->hw;
1800         unsigned int def_q = 0;
1801         unsigned int num_tcs = 0;
1802         unsigned int num_rx_queues = 1;
1803         int err;
1804
1805         spin_lock_bh(&adapter->mbx_lock);
1806
1807         /* fetch queue configuration from the PF */
1808         err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1809
1810         spin_unlock_bh(&adapter->mbx_lock);
1811
1812         if (err)
1813                 return err;
1814
1815         if (num_tcs > 1) {
1816                 /* update default Tx ring register index */
1817                 adapter->tx_ring[0]->reg_idx = def_q;
1818
1819                 /* we need as many queues as traffic classes */
1820                 num_rx_queues = num_tcs;
1821         }
1822
1823         /* if we have a bad config abort request queue reset */
1824         if (adapter->num_rx_queues != num_rx_queues) {
1825                 /* force mailbox timeout to prevent further messages */
1826                 hw->mbx.timeout = 0;
1827
1828                 /* wait for watchdog to come around and bail us out */
1829                 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1830         }
1831
1832         return 0;
1833 }
1834
1835 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1836 {
1837         ixgbevf_configure_dcb(adapter);
1838
1839         ixgbevf_set_rx_mode(adapter->netdev);
1840
1841         ixgbevf_restore_vlan(adapter);
1842
1843         ixgbevf_configure_tx(adapter);
1844         ixgbevf_configure_rx(adapter);
1845 }
1846
1847 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1848 {
1849         /* Only save pre-reset stats if there are some */
1850         if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1851                 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1852                         adapter->stats.base_vfgprc;
1853                 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1854                         adapter->stats.base_vfgptc;
1855                 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1856                         adapter->stats.base_vfgorc;
1857                 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1858                         adapter->stats.base_vfgotc;
1859                 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1860                         adapter->stats.base_vfmprc;
1861         }
1862 }
1863
1864 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1865 {
1866         struct ixgbe_hw *hw = &adapter->hw;
1867
1868         adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1869         adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1870         adapter->stats.last_vfgorc |=
1871                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1872         adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1873         adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1874         adapter->stats.last_vfgotc |=
1875                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1876         adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1877
1878         adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1879         adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1880         adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1881         adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1882         adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1883 }
1884
1885 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1886 {
1887         struct ixgbe_hw *hw = &adapter->hw;
1888         int api[] = { ixgbe_mbox_api_11,
1889                       ixgbe_mbox_api_10,
1890                       ixgbe_mbox_api_unknown };
1891         int err = 0, idx = 0;
1892
1893         spin_lock_bh(&adapter->mbx_lock);
1894
1895         while (api[idx] != ixgbe_mbox_api_unknown) {
1896                 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1897                 if (!err)
1898                         break;
1899                 idx++;
1900         }
1901
1902         spin_unlock_bh(&adapter->mbx_lock);
1903 }
1904
1905 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1906 {
1907         struct net_device *netdev = adapter->netdev;
1908         struct ixgbe_hw *hw = &adapter->hw;
1909
1910         ixgbevf_configure_msix(adapter);
1911
1912         spin_lock_bh(&adapter->mbx_lock);
1913
1914         if (is_valid_ether_addr(hw->mac.addr))
1915                 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1916         else
1917                 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1918
1919         spin_unlock_bh(&adapter->mbx_lock);
1920
1921         smp_mb__before_atomic();
1922         clear_bit(__IXGBEVF_DOWN, &adapter->state);
1923         ixgbevf_napi_enable_all(adapter);
1924
1925         /* enable transmits */
1926         netif_tx_start_all_queues(netdev);
1927
1928         ixgbevf_save_reset_stats(adapter);
1929         ixgbevf_init_last_counter_stats(adapter);
1930
1931         hw->mac.get_link_status = 1;
1932         mod_timer(&adapter->watchdog_timer, jiffies);
1933 }
1934
1935 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1936 {
1937         struct ixgbe_hw *hw = &adapter->hw;
1938
1939         ixgbevf_configure(adapter);
1940
1941         ixgbevf_up_complete(adapter);
1942
1943         /* clear any pending interrupts, may auto mask */
1944         IXGBE_READ_REG(hw, IXGBE_VTEICR);
1945
1946         ixgbevf_irq_enable(adapter);
1947 }
1948
1949 /**
1950  * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1951  * @rx_ring: ring to free buffers from
1952  **/
1953 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
1954 {
1955         struct device *dev = rx_ring->dev;
1956         unsigned long size;
1957         unsigned int i;
1958
1959         /* Free Rx ring sk_buff */
1960         if (rx_ring->skb) {
1961                 dev_kfree_skb(rx_ring->skb);
1962                 rx_ring->skb = NULL;
1963         }
1964
1965         /* ring already cleared, nothing to do */
1966         if (!rx_ring->rx_buffer_info)
1967                 return;
1968
1969         /* Free all the Rx ring pages */
1970         for (i = 0; i < rx_ring->count; i++) {
1971                 struct ixgbevf_rx_buffer *rx_buffer;
1972
1973                 rx_buffer = &rx_ring->rx_buffer_info[i];
1974                 if (rx_buffer->dma)
1975                         dma_unmap_page(dev, rx_buffer->dma,
1976                                        PAGE_SIZE, DMA_FROM_DEVICE);
1977                 rx_buffer->dma = 0;
1978                 if (rx_buffer->page)
1979                         __free_page(rx_buffer->page);
1980                 rx_buffer->page = NULL;
1981         }
1982
1983         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1984         memset(rx_ring->rx_buffer_info, 0, size);
1985
1986         /* Zero out the descriptor ring */
1987         memset(rx_ring->desc, 0, rx_ring->size);
1988 }
1989
1990 /**
1991  * ixgbevf_clean_tx_ring - Free Tx Buffers
1992  * @tx_ring: ring to be cleaned
1993  **/
1994 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
1995 {
1996         struct ixgbevf_tx_buffer *tx_buffer_info;
1997         unsigned long size;
1998         unsigned int i;
1999
2000         if (!tx_ring->tx_buffer_info)
2001                 return;
2002
2003         /* Free all the Tx ring sk_buffs */
2004         for (i = 0; i < tx_ring->count; i++) {
2005                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2006                 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2007         }
2008
2009         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2010         memset(tx_ring->tx_buffer_info, 0, size);
2011
2012         memset(tx_ring->desc, 0, tx_ring->size);
2013 }
2014
2015 /**
2016  * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2017  * @adapter: board private structure
2018  **/
2019 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2020 {
2021         int i;
2022
2023         for (i = 0; i < adapter->num_rx_queues; i++)
2024                 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2025 }
2026
2027 /**
2028  * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2029  * @adapter: board private structure
2030  **/
2031 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2032 {
2033         int i;
2034
2035         for (i = 0; i < adapter->num_tx_queues; i++)
2036                 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2037 }
2038
2039 void ixgbevf_down(struct ixgbevf_adapter *adapter)
2040 {
2041         struct net_device *netdev = adapter->netdev;
2042         struct ixgbe_hw *hw = &adapter->hw;
2043         int i;
2044
2045         /* signal that we are down to the interrupt handler */
2046         if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2047                 return; /* do nothing if already down */
2048
2049         /* disable all enabled rx queues */
2050         for (i = 0; i < adapter->num_rx_queues; i++)
2051                 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2052
2053         netif_tx_disable(netdev);
2054
2055         msleep(10);
2056
2057         netif_tx_stop_all_queues(netdev);
2058
2059         ixgbevf_irq_disable(adapter);
2060
2061         ixgbevf_napi_disable_all(adapter);
2062
2063         del_timer_sync(&adapter->watchdog_timer);
2064         /* can't call flush scheduled work here because it can deadlock
2065          * if linkwatch_event tries to acquire the rtnl_lock which we are
2066          * holding */
2067         while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
2068                 msleep(1);
2069
2070         /* disable transmits in the hardware now that interrupts are off */
2071         for (i = 0; i < adapter->num_tx_queues; i++) {
2072                 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2073
2074                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2075                                 IXGBE_TXDCTL_SWFLSH);
2076         }
2077
2078         netif_carrier_off(netdev);
2079
2080         if (!pci_channel_offline(adapter->pdev))
2081                 ixgbevf_reset(adapter);
2082
2083         ixgbevf_clean_all_tx_rings(adapter);
2084         ixgbevf_clean_all_rx_rings(adapter);
2085 }
2086
2087 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2088 {
2089         WARN_ON(in_interrupt());
2090
2091         while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2092                 msleep(1);
2093
2094         ixgbevf_down(adapter);
2095         ixgbevf_up(adapter);
2096
2097         clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2098 }
2099
2100 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2101 {
2102         struct ixgbe_hw *hw = &adapter->hw;
2103         struct net_device *netdev = adapter->netdev;
2104
2105         if (hw->mac.ops.reset_hw(hw)) {
2106                 hw_dbg(hw, "PF still resetting\n");
2107         } else {
2108                 hw->mac.ops.init_hw(hw);
2109                 ixgbevf_negotiate_api(adapter);
2110         }
2111
2112         if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2113                 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2114                        netdev->addr_len);
2115                 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2116                        netdev->addr_len);
2117         }
2118 }
2119
2120 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2121                                         int vectors)
2122 {
2123         int vector_threshold;
2124
2125         /* We'll want at least 2 (vector_threshold):
2126          * 1) TxQ[0] + RxQ[0] handler
2127          * 2) Other (Link Status Change, etc.)
2128          */
2129         vector_threshold = MIN_MSIX_COUNT;
2130
2131         /* The more we get, the more we will assign to Tx/Rx Cleanup
2132          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2133          * Right now, we simply care about how many we'll get; we'll
2134          * set them up later while requesting irq's.
2135          */
2136         vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2137                                         vector_threshold, vectors);
2138
2139         if (vectors < 0) {
2140                 dev_err(&adapter->pdev->dev,
2141                         "Unable to allocate MSI-X interrupts\n");
2142                 kfree(adapter->msix_entries);
2143                 adapter->msix_entries = NULL;
2144                 return vectors;
2145         }
2146
2147         /* Adjust for only the vectors we'll use, which is minimum
2148          * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2149          * vectors we were allocated.
2150          */
2151         adapter->num_msix_vectors = vectors;
2152
2153         return 0;
2154 }
2155
2156 /**
2157  * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2158  * @adapter: board private structure to initialize
2159  *
2160  * This is the top level queue allocation routine.  The order here is very
2161  * important, starting with the "most" number of features turned on at once,
2162  * and ending with the smallest set of features.  This way large combinations
2163  * can be allocated if they're turned on, and smaller combinations are the
2164  * fallthrough conditions.
2165  *
2166  **/
2167 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2168 {
2169         struct ixgbe_hw *hw = &adapter->hw;
2170         unsigned int def_q = 0;
2171         unsigned int num_tcs = 0;
2172         int err;
2173
2174         /* Start with base case */
2175         adapter->num_rx_queues = 1;
2176         adapter->num_tx_queues = 1;
2177
2178         spin_lock_bh(&adapter->mbx_lock);
2179
2180         /* fetch queue configuration from the PF */
2181         err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2182
2183         spin_unlock_bh(&adapter->mbx_lock);
2184
2185         if (err)
2186                 return;
2187
2188         /* we need as many queues as traffic classes */
2189         if (num_tcs > 1)
2190                 adapter->num_rx_queues = num_tcs;
2191 }
2192
2193 /**
2194  * ixgbevf_alloc_queues - Allocate memory for all rings
2195  * @adapter: board private structure to initialize
2196  *
2197  * We allocate one ring per queue at run-time since we don't know the
2198  * number of queues at compile-time.  The polling_netdev array is
2199  * intended for Multiqueue, but should work fine with a single queue.
2200  **/
2201 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2202 {
2203         struct ixgbevf_ring *ring;
2204         int rx = 0, tx = 0;
2205
2206         for (; tx < adapter->num_tx_queues; tx++) {
2207                 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2208                 if (!ring)
2209                         goto err_allocation;
2210
2211                 ring->dev = &adapter->pdev->dev;
2212                 ring->netdev = adapter->netdev;
2213                 ring->count = adapter->tx_ring_count;
2214                 ring->queue_index = tx;
2215                 ring->reg_idx = tx;
2216
2217                 adapter->tx_ring[tx] = ring;
2218         }
2219
2220         for (; rx < adapter->num_rx_queues; rx++) {
2221                 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2222                 if (!ring)
2223                         goto err_allocation;
2224
2225                 ring->dev = &adapter->pdev->dev;
2226                 ring->netdev = adapter->netdev;
2227
2228                 ring->count = adapter->rx_ring_count;
2229                 ring->queue_index = rx;
2230                 ring->reg_idx = rx;
2231
2232                 adapter->rx_ring[rx] = ring;
2233         }
2234
2235         return 0;
2236
2237 err_allocation:
2238         while (tx) {
2239                 kfree(adapter->tx_ring[--tx]);
2240                 adapter->tx_ring[tx] = NULL;
2241         }
2242
2243         while (rx) {
2244                 kfree(adapter->rx_ring[--rx]);
2245                 adapter->rx_ring[rx] = NULL;
2246         }
2247         return -ENOMEM;
2248 }
2249
2250 /**
2251  * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2252  * @adapter: board private structure to initialize
2253  *
2254  * Attempt to configure the interrupts using the best available
2255  * capabilities of the hardware and the kernel.
2256  **/
2257 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2258 {
2259         struct net_device *netdev = adapter->netdev;
2260         int err = 0;
2261         int vector, v_budget;
2262
2263         /*
2264          * It's easy to be greedy for MSI-X vectors, but it really
2265          * doesn't do us much good if we have a lot more vectors
2266          * than CPU's.  So let's be conservative and only ask for
2267          * (roughly) the same number of vectors as there are CPU's.
2268          * The default is to use pairs of vectors.
2269          */
2270         v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2271         v_budget = min_t(int, v_budget, num_online_cpus());
2272         v_budget += NON_Q_VECTORS;
2273
2274         /* A failure in MSI-X entry allocation isn't fatal, but it does
2275          * mean we disable MSI-X capabilities of the adapter. */
2276         adapter->msix_entries = kcalloc(v_budget,
2277                                         sizeof(struct msix_entry), GFP_KERNEL);
2278         if (!adapter->msix_entries) {
2279                 err = -ENOMEM;
2280                 goto out;
2281         }
2282
2283         for (vector = 0; vector < v_budget; vector++)
2284                 adapter->msix_entries[vector].entry = vector;
2285
2286         err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2287         if (err)
2288                 goto out;
2289
2290         err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2291         if (err)
2292                 goto out;
2293
2294         err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2295
2296 out:
2297         return err;
2298 }
2299
2300 /**
2301  * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2302  * @adapter: board private structure to initialize
2303  *
2304  * We allocate one q_vector per queue interrupt.  If allocation fails we
2305  * return -ENOMEM.
2306  **/
2307 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2308 {
2309         int q_idx, num_q_vectors;
2310         struct ixgbevf_q_vector *q_vector;
2311
2312         num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2313
2314         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2315                 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2316                 if (!q_vector)
2317                         goto err_out;
2318                 q_vector->adapter = adapter;
2319                 q_vector->v_idx = q_idx;
2320                 netif_napi_add(adapter->netdev, &q_vector->napi,
2321                                ixgbevf_poll, 64);
2322 #ifdef CONFIG_NET_RX_BUSY_POLL
2323                 napi_hash_add(&q_vector->napi);
2324 #endif
2325                 adapter->q_vector[q_idx] = q_vector;
2326         }
2327
2328         return 0;
2329
2330 err_out:
2331         while (q_idx) {
2332                 q_idx--;
2333                 q_vector = adapter->q_vector[q_idx];
2334 #ifdef CONFIG_NET_RX_BUSY_POLL
2335                 napi_hash_del(&q_vector->napi);
2336 #endif
2337                 netif_napi_del(&q_vector->napi);
2338                 kfree(q_vector);
2339                 adapter->q_vector[q_idx] = NULL;
2340         }
2341         return -ENOMEM;
2342 }
2343
2344 /**
2345  * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2346  * @adapter: board private structure to initialize
2347  *
2348  * This function frees the memory allocated to the q_vectors.  In addition if
2349  * NAPI is enabled it will delete any references to the NAPI struct prior
2350  * to freeing the q_vector.
2351  **/
2352 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2353 {
2354         int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2355
2356         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2357                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2358
2359                 adapter->q_vector[q_idx] = NULL;
2360 #ifdef CONFIG_NET_RX_BUSY_POLL
2361                 napi_hash_del(&q_vector->napi);
2362 #endif
2363                 netif_napi_del(&q_vector->napi);
2364                 kfree(q_vector);
2365         }
2366 }
2367
2368 /**
2369  * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2370  * @adapter: board private structure
2371  *
2372  **/
2373 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2374 {
2375         pci_disable_msix(adapter->pdev);
2376         kfree(adapter->msix_entries);
2377         adapter->msix_entries = NULL;
2378 }
2379
2380 /**
2381  * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2382  * @adapter: board private structure to initialize
2383  *
2384  **/
2385 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2386 {
2387         int err;
2388
2389         /* Number of supported queues */
2390         ixgbevf_set_num_queues(adapter);
2391
2392         err = ixgbevf_set_interrupt_capability(adapter);
2393         if (err) {
2394                 hw_dbg(&adapter->hw,
2395                        "Unable to setup interrupt capabilities\n");
2396                 goto err_set_interrupt;
2397         }
2398
2399         err = ixgbevf_alloc_q_vectors(adapter);
2400         if (err) {
2401                 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2402                        "vectors\n");
2403                 goto err_alloc_q_vectors;
2404         }
2405
2406         err = ixgbevf_alloc_queues(adapter);
2407         if (err) {
2408                 pr_err("Unable to allocate memory for queues\n");
2409                 goto err_alloc_queues;
2410         }
2411
2412         hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2413                "Tx Queue count = %u\n",
2414                (adapter->num_rx_queues > 1) ? "Enabled" :
2415                "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2416
2417         set_bit(__IXGBEVF_DOWN, &adapter->state);
2418
2419         return 0;
2420 err_alloc_queues:
2421         ixgbevf_free_q_vectors(adapter);
2422 err_alloc_q_vectors:
2423         ixgbevf_reset_interrupt_capability(adapter);
2424 err_set_interrupt:
2425         return err;
2426 }
2427
2428 /**
2429  * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2430  * @adapter: board private structure to clear interrupt scheme on
2431  *
2432  * We go through and clear interrupt specific resources and reset the structure
2433  * to pre-load conditions
2434  **/
2435 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2436 {
2437         int i;
2438
2439         for (i = 0; i < adapter->num_tx_queues; i++) {
2440                 kfree(adapter->tx_ring[i]);
2441                 adapter->tx_ring[i] = NULL;
2442         }
2443         for (i = 0; i < adapter->num_rx_queues; i++) {
2444                 kfree(adapter->rx_ring[i]);
2445                 adapter->rx_ring[i] = NULL;
2446         }
2447
2448         adapter->num_tx_queues = 0;
2449         adapter->num_rx_queues = 0;
2450
2451         ixgbevf_free_q_vectors(adapter);
2452         ixgbevf_reset_interrupt_capability(adapter);
2453 }
2454
2455 /**
2456  * ixgbevf_sw_init - Initialize general software structures
2457  * (struct ixgbevf_adapter)
2458  * @adapter: board private structure to initialize
2459  *
2460  * ixgbevf_sw_init initializes the Adapter private data structure.
2461  * Fields are initialized based on PCI device information and
2462  * OS network device settings (MTU size).
2463  **/
2464 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2465 {
2466         struct ixgbe_hw *hw = &adapter->hw;
2467         struct pci_dev *pdev = adapter->pdev;
2468         struct net_device *netdev = adapter->netdev;
2469         int err;
2470
2471         /* PCI config space info */
2472
2473         hw->vendor_id = pdev->vendor;
2474         hw->device_id = pdev->device;
2475         hw->revision_id = pdev->revision;
2476         hw->subsystem_vendor_id = pdev->subsystem_vendor;
2477         hw->subsystem_device_id = pdev->subsystem_device;
2478
2479         hw->mbx.ops.init_params(hw);
2480
2481         /* assume legacy case in which PF would only give VF 2 queues */
2482         hw->mac.max_tx_queues = 2;
2483         hw->mac.max_rx_queues = 2;
2484
2485         /* lock to protect mailbox accesses */
2486         spin_lock_init(&adapter->mbx_lock);
2487
2488         err = hw->mac.ops.reset_hw(hw);
2489         if (err) {
2490                 dev_info(&pdev->dev,
2491                          "PF still in reset state.  Is the PF interface up?\n");
2492         } else {
2493                 err = hw->mac.ops.init_hw(hw);
2494                 if (err) {
2495                         pr_err("init_shared_code failed: %d\n", err);
2496                         goto out;
2497                 }
2498                 ixgbevf_negotiate_api(adapter);
2499                 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2500                 if (err)
2501                         dev_info(&pdev->dev, "Error reading MAC address\n");
2502                 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2503                         dev_info(&pdev->dev,
2504                                  "MAC address not assigned by administrator.\n");
2505                 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2506         }
2507
2508         if (!is_valid_ether_addr(netdev->dev_addr)) {
2509                 dev_info(&pdev->dev, "Assigning random MAC address\n");
2510                 eth_hw_addr_random(netdev);
2511                 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2512         }
2513
2514         /* Enable dynamic interrupt throttling rates */
2515         adapter->rx_itr_setting = 1;
2516         adapter->tx_itr_setting = 1;
2517
2518         /* set default ring sizes */
2519         adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2520         adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2521
2522         set_bit(__IXGBEVF_DOWN, &adapter->state);
2523         return 0;
2524
2525 out:
2526         return err;
2527 }
2528
2529 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)     \
2530         {                                                       \
2531                 u32 current_counter = IXGBE_READ_REG(hw, reg);  \
2532                 if (current_counter < last_counter)             \
2533                         counter += 0x100000000LL;               \
2534                 last_counter = current_counter;                 \
2535                 counter &= 0xFFFFFFFF00000000LL;                \
2536                 counter |= current_counter;                     \
2537         }
2538
2539 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2540         {                                                                \
2541                 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);   \
2542                 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);   \
2543                 u64 current_counter = (current_counter_msb << 32) |      \
2544                         current_counter_lsb;                             \
2545                 if (current_counter < last_counter)                      \
2546                         counter += 0x1000000000LL;                       \
2547                 last_counter = current_counter;                          \
2548                 counter &= 0xFFFFFFF000000000LL;                         \
2549                 counter |= current_counter;                              \
2550         }
2551 /**
2552  * ixgbevf_update_stats - Update the board statistics counters.
2553  * @adapter: board private structure
2554  **/
2555 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2556 {
2557         struct ixgbe_hw *hw = &adapter->hw;
2558         int i;
2559
2560         if (!adapter->link_up)
2561                 return;
2562
2563         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2564                                 adapter->stats.vfgprc);
2565         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2566                                 adapter->stats.vfgptc);
2567         UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2568                                 adapter->stats.last_vfgorc,
2569                                 adapter->stats.vfgorc);
2570         UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2571                                 adapter->stats.last_vfgotc,
2572                                 adapter->stats.vfgotc);
2573         UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2574                                 adapter->stats.vfmprc);
2575
2576         for (i = 0;  i  < adapter->num_rx_queues;  i++) {
2577                 adapter->hw_csum_rx_error +=
2578                         adapter->rx_ring[i]->hw_csum_rx_error;
2579                 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2580         }
2581 }
2582
2583 /**
2584  * ixgbevf_watchdog - Timer Call-back
2585  * @data: pointer to adapter cast into an unsigned long
2586  **/
2587 static void ixgbevf_watchdog(unsigned long data)
2588 {
2589         struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2590         struct ixgbe_hw *hw = &adapter->hw;
2591         u32 eics = 0;
2592         int i;
2593
2594         /*
2595          * Do the watchdog outside of interrupt context due to the lovely
2596          * delays that some of the newer hardware requires
2597          */
2598
2599         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2600                 goto watchdog_short_circuit;
2601
2602         /* get one bit for every active tx/rx interrupt vector */
2603         for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2604                 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2605                 if (qv->rx.ring || qv->tx.ring)
2606                         eics |= 1 << i;
2607         }
2608
2609         IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2610
2611 watchdog_short_circuit:
2612         schedule_work(&adapter->watchdog_task);
2613 }
2614
2615 /**
2616  * ixgbevf_tx_timeout - Respond to a Tx Hang
2617  * @netdev: network interface device structure
2618  **/
2619 static void ixgbevf_tx_timeout(struct net_device *netdev)
2620 {
2621         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2622
2623         /* Do the reset outside of interrupt context */
2624         schedule_work(&adapter->reset_task);
2625 }
2626
2627 static void ixgbevf_reset_task(struct work_struct *work)
2628 {
2629         struct ixgbevf_adapter *adapter;
2630         adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2631
2632         /* If we're already down or resetting, just bail */
2633         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2634             test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2635             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2636                 return;
2637
2638         adapter->tx_timeout_count++;
2639
2640         ixgbevf_reinit_locked(adapter);
2641 }
2642
2643 /**
2644  * ixgbevf_watchdog_task - worker thread to bring link up
2645  * @work: pointer to work_struct containing our data
2646  **/
2647 static void ixgbevf_watchdog_task(struct work_struct *work)
2648 {
2649         struct ixgbevf_adapter *adapter = container_of(work,
2650                                                        struct ixgbevf_adapter,
2651                                                        watchdog_task);
2652         struct net_device *netdev = adapter->netdev;
2653         struct ixgbe_hw *hw = &adapter->hw;
2654         u32 link_speed = adapter->link_speed;
2655         bool link_up = adapter->link_up;
2656         s32 need_reset;
2657
2658         if (IXGBE_REMOVED(hw->hw_addr)) {
2659                 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2660                         rtnl_lock();
2661                         ixgbevf_down(adapter);
2662                         rtnl_unlock();
2663                 }
2664                 return;
2665         }
2666         ixgbevf_queue_reset_subtask(adapter);
2667
2668         adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2669
2670         /*
2671          * Always check the link on the watchdog because we have
2672          * no LSC interrupt
2673          */
2674         spin_lock_bh(&adapter->mbx_lock);
2675
2676         need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2677
2678         spin_unlock_bh(&adapter->mbx_lock);
2679
2680         if (need_reset) {
2681                 adapter->link_up = link_up;
2682                 adapter->link_speed = link_speed;
2683                 netif_carrier_off(netdev);
2684                 netif_tx_stop_all_queues(netdev);
2685                 schedule_work(&adapter->reset_task);
2686                 goto pf_has_reset;
2687         }
2688         adapter->link_up = link_up;
2689         adapter->link_speed = link_speed;
2690
2691         if (link_up) {
2692                 if (!netif_carrier_ok(netdev)) {
2693                         char *link_speed_string;
2694                         switch (link_speed) {
2695                         case IXGBE_LINK_SPEED_10GB_FULL:
2696                                 link_speed_string = "10 Gbps";
2697                                 break;
2698                         case IXGBE_LINK_SPEED_1GB_FULL:
2699                                 link_speed_string = "1 Gbps";
2700                                 break;
2701                         case IXGBE_LINK_SPEED_100_FULL:
2702                                 link_speed_string = "100 Mbps";
2703                                 break;
2704                         default:
2705                                 link_speed_string = "unknown speed";
2706                                 break;
2707                         }
2708                         dev_info(&adapter->pdev->dev,
2709                                 "NIC Link is Up, %s\n", link_speed_string);
2710                         netif_carrier_on(netdev);
2711                         netif_tx_wake_all_queues(netdev);
2712                 }
2713         } else {
2714                 adapter->link_up = false;
2715                 adapter->link_speed = 0;
2716                 if (netif_carrier_ok(netdev)) {
2717                         dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2718                         netif_carrier_off(netdev);
2719                         netif_tx_stop_all_queues(netdev);
2720                 }
2721         }
2722
2723         ixgbevf_update_stats(adapter);
2724
2725 pf_has_reset:
2726         /* Reset the timer */
2727         if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2728             !test_bit(__IXGBEVF_REMOVING, &adapter->state))
2729                 mod_timer(&adapter->watchdog_timer,
2730                           round_jiffies(jiffies + (2 * HZ)));
2731
2732         adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2733 }
2734
2735 /**
2736  * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2737  * @tx_ring: Tx descriptor ring for a specific queue
2738  *
2739  * Free all transmit software resources
2740  **/
2741 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2742 {
2743         ixgbevf_clean_tx_ring(tx_ring);
2744
2745         vfree(tx_ring->tx_buffer_info);
2746         tx_ring->tx_buffer_info = NULL;
2747
2748         /* if not set, then don't free */
2749         if (!tx_ring->desc)
2750                 return;
2751
2752         dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2753                           tx_ring->dma);
2754
2755         tx_ring->desc = NULL;
2756 }
2757
2758 /**
2759  * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2760  * @adapter: board private structure
2761  *
2762  * Free all transmit software resources
2763  **/
2764 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2765 {
2766         int i;
2767
2768         for (i = 0; i < adapter->num_tx_queues; i++)
2769                 if (adapter->tx_ring[i]->desc)
2770                         ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2771 }
2772
2773 /**
2774  * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2775  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
2776  *
2777  * Return 0 on success, negative on failure
2778  **/
2779 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2780 {
2781         int size;
2782
2783         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2784         tx_ring->tx_buffer_info = vzalloc(size);
2785         if (!tx_ring->tx_buffer_info)
2786                 goto err;
2787
2788         /* round up to nearest 4K */
2789         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2790         tx_ring->size = ALIGN(tx_ring->size, 4096);
2791
2792         tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2793                                            &tx_ring->dma, GFP_KERNEL);
2794         if (!tx_ring->desc)
2795                 goto err;
2796
2797         return 0;
2798
2799 err:
2800         vfree(tx_ring->tx_buffer_info);
2801         tx_ring->tx_buffer_info = NULL;
2802         hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2803                "descriptor ring\n");
2804         return -ENOMEM;
2805 }
2806
2807 /**
2808  * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2809  * @adapter: board private structure
2810  *
2811  * If this function returns with an error, then it's possible one or
2812  * more of the rings is populated (while the rest are not).  It is the
2813  * callers duty to clean those orphaned rings.
2814  *
2815  * Return 0 on success, negative on failure
2816  **/
2817 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2818 {
2819         int i, err = 0;
2820
2821         for (i = 0; i < adapter->num_tx_queues; i++) {
2822                 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
2823                 if (!err)
2824                         continue;
2825                 hw_dbg(&adapter->hw,
2826                        "Allocation for Tx Queue %u failed\n", i);
2827                 break;
2828         }
2829
2830         return err;
2831 }
2832
2833 /**
2834  * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2835  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2836  *
2837  * Returns 0 on success, negative on failure
2838  **/
2839 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
2840 {
2841         int size;
2842
2843         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2844         rx_ring->rx_buffer_info = vzalloc(size);
2845         if (!rx_ring->rx_buffer_info)
2846                 goto err;
2847
2848         /* Round up to nearest 4K */
2849         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2850         rx_ring->size = ALIGN(rx_ring->size, 4096);
2851
2852         rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2853                                            &rx_ring->dma, GFP_KERNEL);
2854
2855         if (!rx_ring->desc)
2856                 goto err;
2857
2858         return 0;
2859 err:
2860         vfree(rx_ring->rx_buffer_info);
2861         rx_ring->rx_buffer_info = NULL;
2862         dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
2863         return -ENOMEM;
2864 }
2865
2866 /**
2867  * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2868  * @adapter: board private structure
2869  *
2870  * If this function returns with an error, then it's possible one or
2871  * more of the rings is populated (while the rest are not).  It is the
2872  * callers duty to clean those orphaned rings.
2873  *
2874  * Return 0 on success, negative on failure
2875  **/
2876 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2877 {
2878         int i, err = 0;
2879
2880         for (i = 0; i < adapter->num_rx_queues; i++) {
2881                 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
2882                 if (!err)
2883                         continue;
2884                 hw_dbg(&adapter->hw,
2885                        "Allocation for Rx Queue %u failed\n", i);
2886                 break;
2887         }
2888         return err;
2889 }
2890
2891 /**
2892  * ixgbevf_free_rx_resources - Free Rx Resources
2893  * @rx_ring: ring to clean the resources from
2894  *
2895  * Free all receive software resources
2896  **/
2897 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
2898 {
2899         ixgbevf_clean_rx_ring(rx_ring);
2900
2901         vfree(rx_ring->rx_buffer_info);
2902         rx_ring->rx_buffer_info = NULL;
2903
2904         dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2905                           rx_ring->dma);
2906
2907         rx_ring->desc = NULL;
2908 }
2909
2910 /**
2911  * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2912  * @adapter: board private structure
2913  *
2914  * Free all receive software resources
2915  **/
2916 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2917 {
2918         int i;
2919
2920         for (i = 0; i < adapter->num_rx_queues; i++)
2921                 if (adapter->rx_ring[i]->desc)
2922                         ixgbevf_free_rx_resources(adapter->rx_ring[i]);
2923 }
2924
2925 /**
2926  * ixgbevf_open - Called when a network interface is made active
2927  * @netdev: network interface device structure
2928  *
2929  * Returns 0 on success, negative value on failure
2930  *
2931  * The open entry point is called when a network interface is made
2932  * active by the system (IFF_UP).  At this point all resources needed
2933  * for transmit and receive operations are allocated, the interrupt
2934  * handler is registered with the OS, the watchdog timer is started,
2935  * and the stack is notified that the interface is ready.
2936  **/
2937 static int ixgbevf_open(struct net_device *netdev)
2938 {
2939         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2940         struct ixgbe_hw *hw = &adapter->hw;
2941         int err;
2942
2943         /* A previous failure to open the device because of a lack of
2944          * available MSIX vector resources may have reset the number
2945          * of msix vectors variable to zero.  The only way to recover
2946          * is to unload/reload the driver and hope that the system has
2947          * been able to recover some MSIX vector resources.
2948          */
2949         if (!adapter->num_msix_vectors)
2950                 return -ENOMEM;
2951
2952         /* disallow open during test */
2953         if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2954                 return -EBUSY;
2955
2956         if (hw->adapter_stopped) {
2957                 ixgbevf_reset(adapter);
2958                 /* if adapter is still stopped then PF isn't up and
2959                  * the vf can't start. */
2960                 if (hw->adapter_stopped) {
2961                         err = IXGBE_ERR_MBX;
2962                         pr_err("Unable to start - perhaps the PF Driver isn't "
2963                                "up yet\n");
2964                         goto err_setup_reset;
2965                 }
2966         }
2967
2968         /* allocate transmit descriptors */
2969         err = ixgbevf_setup_all_tx_resources(adapter);
2970         if (err)
2971                 goto err_setup_tx;
2972
2973         /* allocate receive descriptors */
2974         err = ixgbevf_setup_all_rx_resources(adapter);
2975         if (err)
2976                 goto err_setup_rx;
2977
2978         ixgbevf_configure(adapter);
2979
2980         /*
2981          * Map the Tx/Rx rings to the vectors we were allotted.
2982          * if request_irq will be called in this function map_rings
2983          * must be called *before* up_complete
2984          */
2985         ixgbevf_map_rings_to_vectors(adapter);
2986
2987         ixgbevf_up_complete(adapter);
2988
2989         /* clear any pending interrupts, may auto mask */
2990         IXGBE_READ_REG(hw, IXGBE_VTEICR);
2991         err = ixgbevf_request_irq(adapter);
2992         if (err)
2993                 goto err_req_irq;
2994
2995         ixgbevf_irq_enable(adapter);
2996
2997         return 0;
2998
2999 err_req_irq:
3000         ixgbevf_down(adapter);
3001 err_setup_rx:
3002         ixgbevf_free_all_rx_resources(adapter);
3003 err_setup_tx:
3004         ixgbevf_free_all_tx_resources(adapter);
3005         ixgbevf_reset(adapter);
3006
3007 err_setup_reset:
3008
3009         return err;
3010 }
3011
3012 /**
3013  * ixgbevf_close - Disables a network interface
3014  * @netdev: network interface device structure
3015  *
3016  * Returns 0, this is not allowed to fail
3017  *
3018  * The close entry point is called when an interface is de-activated
3019  * by the OS.  The hardware is still under the drivers control, but
3020  * needs to be disabled.  A global MAC reset is issued to stop the
3021  * hardware, and all transmit and receive resources are freed.
3022  **/
3023 static int ixgbevf_close(struct net_device *netdev)
3024 {
3025         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3026
3027         ixgbevf_down(adapter);
3028         ixgbevf_free_irq(adapter);
3029
3030         ixgbevf_free_all_tx_resources(adapter);
3031         ixgbevf_free_all_rx_resources(adapter);
3032
3033         return 0;
3034 }
3035
3036 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3037 {
3038         struct net_device *dev = adapter->netdev;
3039
3040         if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
3041                 return;
3042
3043         adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
3044
3045         /* if interface is down do nothing */
3046         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3047             test_bit(__IXGBEVF_RESETTING, &adapter->state))
3048                 return;
3049
3050         /* Hardware has to reinitialize queues and interrupts to
3051          * match packet buffer alignment. Unfortunately, the
3052          * hardware is not flexible enough to do this dynamically.
3053          */
3054         if (netif_running(dev))
3055                 ixgbevf_close(dev);
3056
3057         ixgbevf_clear_interrupt_scheme(adapter);
3058         ixgbevf_init_interrupt_scheme(adapter);
3059
3060         if (netif_running(dev))
3061                 ixgbevf_open(dev);
3062 }
3063
3064 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3065                                 u32 vlan_macip_lens, u32 type_tucmd,
3066                                 u32 mss_l4len_idx)
3067 {
3068         struct ixgbe_adv_tx_context_desc *context_desc;
3069         u16 i = tx_ring->next_to_use;
3070
3071         context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3072
3073         i++;
3074         tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3075
3076         /* set bits to identify this as an advanced context descriptor */
3077         type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3078
3079         context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
3080         context_desc->seqnum_seed       = 0;
3081         context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
3082         context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
3083 }
3084
3085 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3086                        struct ixgbevf_tx_buffer *first,
3087                        u8 *hdr_len)
3088 {
3089         struct sk_buff *skb = first->skb;
3090         u32 vlan_macip_lens, type_tucmd;
3091         u32 mss_l4len_idx, l4len;
3092         int err;
3093
3094         if (skb->ip_summed != CHECKSUM_PARTIAL)
3095                 return 0;
3096
3097         if (!skb_is_gso(skb))
3098                 return 0;
3099
3100         err = skb_cow_head(skb, 0);
3101         if (err < 0)
3102                 return err;
3103
3104         /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3105         type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3106
3107         if (skb->protocol == htons(ETH_P_IP)) {
3108                 struct iphdr *iph = ip_hdr(skb);
3109                 iph->tot_len = 0;
3110                 iph->check = 0;
3111                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3112                                                          iph->daddr, 0,
3113                                                          IPPROTO_TCP,
3114                                                          0);
3115                 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3116                 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3117                                    IXGBE_TX_FLAGS_CSUM |
3118                                    IXGBE_TX_FLAGS_IPV4;
3119         } else if (skb_is_gso_v6(skb)) {
3120                 ipv6_hdr(skb)->payload_len = 0;
3121                 tcp_hdr(skb)->check =
3122                     ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3123                                      &ipv6_hdr(skb)->daddr,
3124                                      0, IPPROTO_TCP, 0);
3125                 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3126                                    IXGBE_TX_FLAGS_CSUM;
3127         }
3128
3129         /* compute header lengths */
3130         l4len = tcp_hdrlen(skb);
3131         *hdr_len += l4len;
3132         *hdr_len = skb_transport_offset(skb) + l4len;
3133
3134         /* update gso size and bytecount with header size */
3135         first->gso_segs = skb_shinfo(skb)->gso_segs;
3136         first->bytecount += (first->gso_segs - 1) * *hdr_len;
3137
3138         /* mss_l4len_id: use 1 as index for TSO */
3139         mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
3140         mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3141         mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
3142
3143         /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3144         vlan_macip_lens = skb_network_header_len(skb);
3145         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3146         vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3147
3148         ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3149                             type_tucmd, mss_l4len_idx);
3150
3151         return 1;
3152 }
3153
3154 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3155                             struct ixgbevf_tx_buffer *first)
3156 {
3157         struct sk_buff *skb = first->skb;
3158         u32 vlan_macip_lens = 0;
3159         u32 mss_l4len_idx = 0;
3160         u32 type_tucmd = 0;
3161
3162         if (skb->ip_summed == CHECKSUM_PARTIAL) {
3163                 u8 l4_hdr = 0;
3164                 switch (skb->protocol) {
3165                 case htons(ETH_P_IP):
3166                         vlan_macip_lens |= skb_network_header_len(skb);
3167                         type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3168                         l4_hdr = ip_hdr(skb)->protocol;
3169                         break;
3170                 case htons(ETH_P_IPV6):
3171                         vlan_macip_lens |= skb_network_header_len(skb);
3172                         l4_hdr = ipv6_hdr(skb)->nexthdr;
3173                         break;
3174                 default:
3175                         if (unlikely(net_ratelimit())) {
3176                                 dev_warn(tx_ring->dev,
3177                                  "partial checksum but proto=%x!\n",
3178                                  first->protocol);
3179                         }
3180                         break;
3181                 }
3182
3183                 switch (l4_hdr) {
3184                 case IPPROTO_TCP:
3185                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3186                         mss_l4len_idx = tcp_hdrlen(skb) <<
3187                                         IXGBE_ADVTXD_L4LEN_SHIFT;
3188                         break;
3189                 case IPPROTO_SCTP:
3190                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3191                         mss_l4len_idx = sizeof(struct sctphdr) <<
3192                                         IXGBE_ADVTXD_L4LEN_SHIFT;
3193                         break;
3194                 case IPPROTO_UDP:
3195                         mss_l4len_idx = sizeof(struct udphdr) <<
3196                                         IXGBE_ADVTXD_L4LEN_SHIFT;
3197                         break;
3198                 default:
3199                         if (unlikely(net_ratelimit())) {
3200                                 dev_warn(tx_ring->dev,
3201                                  "partial checksum but l4 proto=%x!\n",
3202                                  l4_hdr);
3203                         }
3204                         break;
3205                 }
3206
3207                 /* update TX checksum flag */
3208                 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3209         }
3210
3211         /* vlan_macip_lens: MACLEN, VLAN tag */
3212         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3213         vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3214
3215         ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3216                             type_tucmd, mss_l4len_idx);
3217 }
3218
3219 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3220 {
3221         /* set type for advanced descriptor with frame checksum insertion */
3222         __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3223                                       IXGBE_ADVTXD_DCMD_IFCS |
3224                                       IXGBE_ADVTXD_DCMD_DEXT);
3225
3226         /* set HW vlan bit if vlan is present */
3227         if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3228                 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3229
3230         /* set segmentation enable bits for TSO/FSO */
3231         if (tx_flags & IXGBE_TX_FLAGS_TSO)
3232                 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3233
3234         return cmd_type;
3235 }
3236
3237 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3238                                      u32 tx_flags, unsigned int paylen)
3239 {
3240         __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3241
3242         /* enable L4 checksum for TSO and TX checksum offload */
3243         if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3244                 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3245
3246         /* enble IPv4 checksum for TSO */
3247         if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3248                 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3249
3250         /* use index 1 context for TSO/FSO/FCOE */
3251         if (tx_flags & IXGBE_TX_FLAGS_TSO)
3252                 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
3253
3254         /* Check Context must be set if Tx switch is enabled, which it
3255          * always is for case where virtual functions are running
3256          */
3257         olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3258
3259         tx_desc->read.olinfo_status = olinfo_status;
3260 }
3261
3262 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3263                            struct ixgbevf_tx_buffer *first,
3264                            const u8 hdr_len)
3265 {
3266         dma_addr_t dma;
3267         struct sk_buff *skb = first->skb;
3268         struct ixgbevf_tx_buffer *tx_buffer;
3269         union ixgbe_adv_tx_desc *tx_desc;
3270         struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3271         unsigned int data_len = skb->data_len;
3272         unsigned int size = skb_headlen(skb);
3273         unsigned int paylen = skb->len - hdr_len;
3274         u32 tx_flags = first->tx_flags;
3275         __le32 cmd_type;
3276         u16 i = tx_ring->next_to_use;
3277
3278         tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3279
3280         ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3281         cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3282
3283         dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3284         if (dma_mapping_error(tx_ring->dev, dma))
3285                 goto dma_error;
3286
3287         /* record length, and DMA address */
3288         dma_unmap_len_set(first, len, size);
3289         dma_unmap_addr_set(first, dma, dma);
3290
3291         tx_desc->read.buffer_addr = cpu_to_le64(dma);
3292
3293         for (;;) {
3294                 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3295                         tx_desc->read.cmd_type_len =
3296                                 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3297
3298                         i++;
3299                         tx_desc++;
3300                         if (i == tx_ring->count) {
3301                                 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3302                                 i = 0;
3303                         }
3304
3305                         dma += IXGBE_MAX_DATA_PER_TXD;
3306                         size -= IXGBE_MAX_DATA_PER_TXD;
3307
3308                         tx_desc->read.buffer_addr = cpu_to_le64(dma);
3309                         tx_desc->read.olinfo_status = 0;
3310                 }
3311
3312                 if (likely(!data_len))
3313                         break;
3314
3315                 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3316
3317                 i++;
3318                 tx_desc++;
3319                 if (i == tx_ring->count) {
3320                         tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3321                         i = 0;
3322                 }
3323
3324                 size = skb_frag_size(frag);
3325                 data_len -= size;
3326
3327                 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3328                                        DMA_TO_DEVICE);
3329                 if (dma_mapping_error(tx_ring->dev, dma))
3330                         goto dma_error;
3331
3332                 tx_buffer = &tx_ring->tx_buffer_info[i];
3333                 dma_unmap_len_set(tx_buffer, len, size);
3334                 dma_unmap_addr_set(tx_buffer, dma, dma);
3335
3336                 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3337                 tx_desc->read.olinfo_status = 0;
3338
3339                 frag++;
3340         }
3341
3342         /* write last descriptor with RS and EOP bits */
3343         cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3344         tx_desc->read.cmd_type_len = cmd_type;
3345
3346         /* set the timestamp */
3347         first->time_stamp = jiffies;
3348
3349         /* Force memory writes to complete before letting h/w know there
3350          * are new descriptors to fetch.  (Only applicable for weak-ordered
3351          * memory model archs, such as IA-64).
3352          *
3353          * We also need this memory barrier (wmb) to make certain all of the
3354          * status bits have been updated before next_to_watch is written.
3355          */
3356         wmb();
3357
3358         /* set next_to_watch value indicating a packet is present */
3359         first->next_to_watch = tx_desc;
3360
3361         i++;
3362         if (i == tx_ring->count)
3363                 i = 0;
3364
3365         tx_ring->next_to_use = i;
3366
3367         /* notify HW of packet */
3368         ixgbevf_write_tail(tx_ring, i);
3369
3370         return;
3371 dma_error:
3372         dev_err(tx_ring->dev, "TX DMA map failed\n");
3373
3374         /* clear dma mappings for failed tx_buffer_info map */
3375         for (;;) {
3376                 tx_buffer = &tx_ring->tx_buffer_info[i];
3377                 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3378                 if (tx_buffer == first)
3379                         break;
3380                 if (i == 0)
3381                         i = tx_ring->count;
3382                 i--;
3383         }
3384
3385         tx_ring->next_to_use = i;
3386 }
3387
3388 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3389 {
3390         netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3391         /* Herbert's original patch had:
3392          *  smp_mb__after_netif_stop_queue();
3393          * but since that doesn't exist yet, just open code it. */
3394         smp_mb();
3395
3396         /* We need to check again in a case another CPU has just
3397          * made room available. */
3398         if (likely(ixgbevf_desc_unused(tx_ring) < size))
3399                 return -EBUSY;
3400
3401         /* A reprieve! - use start_queue because it doesn't call schedule */
3402         netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3403         ++tx_ring->tx_stats.restart_queue;
3404
3405         return 0;
3406 }
3407
3408 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3409 {
3410         if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3411                 return 0;
3412         return __ixgbevf_maybe_stop_tx(tx_ring, size);
3413 }
3414
3415 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3416 {
3417         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3418         struct ixgbevf_tx_buffer *first;
3419         struct ixgbevf_ring *tx_ring;
3420         int tso;
3421         u32 tx_flags = 0;
3422         u16 count = TXD_USE_COUNT(skb_headlen(skb));
3423 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3424         unsigned short f;
3425 #endif
3426         u8 hdr_len = 0;
3427         u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3428
3429         if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3430                 dev_kfree_skb(skb);
3431                 return NETDEV_TX_OK;
3432         }
3433
3434         tx_ring = adapter->tx_ring[skb->queue_mapping];
3435
3436         /*
3437          * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3438          *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3439          *       + 2 desc gap to keep tail from touching head,
3440          *       + 1 desc for context descriptor,
3441          * otherwise try next time
3442          */
3443 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3444         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3445                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3446 #else
3447         count += skb_shinfo(skb)->nr_frags;
3448 #endif
3449         if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3450                 tx_ring->tx_stats.tx_busy++;
3451                 return NETDEV_TX_BUSY;
3452         }
3453
3454         /* record the location of the first descriptor for this packet */
3455         first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3456         first->skb = skb;
3457         first->bytecount = skb->len;
3458         first->gso_segs = 1;
3459
3460         if (vlan_tx_tag_present(skb)) {
3461                 tx_flags |= vlan_tx_tag_get(skb);
3462                 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3463                 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3464         }
3465
3466         /* record initial flags and protocol */
3467         first->tx_flags = tx_flags;
3468         first->protocol = vlan_get_protocol(skb);
3469
3470         tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3471         if (tso < 0)
3472                 goto out_drop;
3473         else if (!tso)
3474                 ixgbevf_tx_csum(tx_ring, first);
3475
3476         ixgbevf_tx_map(tx_ring, first, hdr_len);
3477
3478         ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3479
3480         return NETDEV_TX_OK;
3481
3482 out_drop:
3483         dev_kfree_skb_any(first->skb);
3484         first->skb = NULL;
3485
3486         return NETDEV_TX_OK;
3487 }
3488
3489 /**
3490  * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3491  * @netdev: network interface device structure
3492  * @p: pointer to an address structure
3493  *
3494  * Returns 0 on success, negative on failure
3495  **/
3496 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3497 {
3498         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3499         struct ixgbe_hw *hw = &adapter->hw;
3500         struct sockaddr *addr = p;
3501
3502         if (!is_valid_ether_addr(addr->sa_data))
3503                 return -EADDRNOTAVAIL;
3504
3505         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3506         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3507
3508         spin_lock_bh(&adapter->mbx_lock);
3509
3510         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3511
3512         spin_unlock_bh(&adapter->mbx_lock);
3513
3514         return 0;
3515 }
3516
3517 /**
3518  * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3519  * @netdev: network interface device structure
3520  * @new_mtu: new value for maximum frame size
3521  *
3522  * Returns 0 on success, negative on failure
3523  **/
3524 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3525 {
3526         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3527         struct ixgbe_hw *hw = &adapter->hw;
3528         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3529         int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3530
3531         switch (adapter->hw.api_version) {
3532         case ixgbe_mbox_api_11:
3533                 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3534                 break;
3535         default:
3536                 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
3537                         max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3538                 break;
3539         }
3540
3541         /* MTU < 68 is an error and causes problems on some kernels */
3542         if ((new_mtu < 68) || (max_frame > max_possible_frame))
3543                 return -EINVAL;
3544
3545         hw_dbg(hw, "changing MTU from %d to %d\n",
3546                netdev->mtu, new_mtu);
3547         /* must set new MTU before calling down or up */
3548         netdev->mtu = new_mtu;
3549
3550         /* notify the PF of our intent to use this size of frame */
3551         ixgbevf_rlpml_set_vf(hw, max_frame);
3552
3553         return 0;
3554 }
3555
3556 #ifdef CONFIG_NET_POLL_CONTROLLER
3557 /* Polling 'interrupt' - used by things like netconsole to send skbs
3558  * without having to re-enable interrupts. It's not called while
3559  * the interrupt routine is executing.
3560  */
3561 static void ixgbevf_netpoll(struct net_device *netdev)
3562 {
3563         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3564         int i;
3565
3566         /* if interface is down do nothing */
3567         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3568                 return;
3569         for (i = 0; i < adapter->num_rx_queues; i++)
3570                 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3571 }
3572 #endif /* CONFIG_NET_POLL_CONTROLLER */
3573
3574 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3575 {
3576         struct net_device *netdev = pci_get_drvdata(pdev);
3577         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3578 #ifdef CONFIG_PM
3579         int retval = 0;
3580 #endif
3581
3582         netif_device_detach(netdev);
3583
3584         if (netif_running(netdev)) {
3585                 rtnl_lock();
3586                 ixgbevf_down(adapter);
3587                 ixgbevf_free_irq(adapter);
3588                 ixgbevf_free_all_tx_resources(adapter);
3589                 ixgbevf_free_all_rx_resources(adapter);
3590                 rtnl_unlock();
3591         }
3592
3593         ixgbevf_clear_interrupt_scheme(adapter);
3594
3595 #ifdef CONFIG_PM
3596         retval = pci_save_state(pdev);
3597         if (retval)
3598                 return retval;
3599
3600 #endif
3601         if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3602                 pci_disable_device(pdev);
3603
3604         return 0;
3605 }
3606
3607 #ifdef CONFIG_PM
3608 static int ixgbevf_resume(struct pci_dev *pdev)
3609 {
3610         struct net_device *netdev = pci_get_drvdata(pdev);
3611         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3612         u32 err;
3613
3614         pci_restore_state(pdev);
3615         /*
3616          * pci_restore_state clears dev->state_saved so call
3617          * pci_save_state to restore it.
3618          */
3619         pci_save_state(pdev);
3620
3621         err = pci_enable_device_mem(pdev);
3622         if (err) {
3623                 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3624                 return err;
3625         }
3626         smp_mb__before_atomic();
3627         clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3628         pci_set_master(pdev);
3629
3630         ixgbevf_reset(adapter);
3631
3632         rtnl_lock();
3633         err = ixgbevf_init_interrupt_scheme(adapter);
3634         rtnl_unlock();
3635         if (err) {
3636                 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3637                 return err;
3638         }
3639
3640         if (netif_running(netdev)) {
3641                 err = ixgbevf_open(netdev);
3642                 if (err)
3643                         return err;
3644         }
3645
3646         netif_device_attach(netdev);
3647
3648         return err;
3649 }
3650
3651 #endif /* CONFIG_PM */
3652 static void ixgbevf_shutdown(struct pci_dev *pdev)
3653 {
3654         ixgbevf_suspend(pdev, PMSG_SUSPEND);
3655 }
3656
3657 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3658                                                 struct rtnl_link_stats64 *stats)
3659 {
3660         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3661         unsigned int start;
3662         u64 bytes, packets;
3663         const struct ixgbevf_ring *ring;
3664         int i;
3665
3666         ixgbevf_update_stats(adapter);
3667
3668         stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3669
3670         for (i = 0; i < adapter->num_rx_queues; i++) {
3671                 ring = adapter->rx_ring[i];
3672                 do {
3673                         start = u64_stats_fetch_begin_irq(&ring->syncp);
3674                         bytes = ring->stats.bytes;
3675                         packets = ring->stats.packets;
3676                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3677                 stats->rx_bytes += bytes;
3678                 stats->rx_packets += packets;
3679         }
3680
3681         for (i = 0; i < adapter->num_tx_queues; i++) {
3682                 ring = adapter->tx_ring[i];
3683                 do {
3684                         start = u64_stats_fetch_begin_irq(&ring->syncp);
3685                         bytes = ring->stats.bytes;
3686                         packets = ring->stats.packets;
3687                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3688                 stats->tx_bytes += bytes;
3689                 stats->tx_packets += packets;
3690         }
3691
3692         return stats;
3693 }
3694
3695 static const struct net_device_ops ixgbevf_netdev_ops = {
3696         .ndo_open               = ixgbevf_open,
3697         .ndo_stop               = ixgbevf_close,
3698         .ndo_start_xmit         = ixgbevf_xmit_frame,
3699         .ndo_set_rx_mode        = ixgbevf_set_rx_mode,
3700         .ndo_get_stats64        = ixgbevf_get_stats,
3701         .ndo_validate_addr      = eth_validate_addr,
3702         .ndo_set_mac_address    = ixgbevf_set_mac,
3703         .ndo_change_mtu         = ixgbevf_change_mtu,
3704         .ndo_tx_timeout         = ixgbevf_tx_timeout,
3705         .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
3706         .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
3707 #ifdef CONFIG_NET_RX_BUSY_POLL
3708         .ndo_busy_poll          = ixgbevf_busy_poll_recv,
3709 #endif
3710 #ifdef CONFIG_NET_POLL_CONTROLLER
3711         .ndo_poll_controller    = ixgbevf_netpoll,
3712 #endif
3713 };
3714
3715 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3716 {
3717         dev->netdev_ops = &ixgbevf_netdev_ops;
3718         ixgbevf_set_ethtool_ops(dev);
3719         dev->watchdog_timeo = 5 * HZ;
3720 }
3721
3722 /**
3723  * ixgbevf_probe - Device Initialization Routine
3724  * @pdev: PCI device information struct
3725  * @ent: entry in ixgbevf_pci_tbl
3726  *
3727  * Returns 0 on success, negative on failure
3728  *
3729  * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3730  * The OS initialization, configuring of the adapter private structure,
3731  * and a hardware reset occur.
3732  **/
3733 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3734 {
3735         struct net_device *netdev;
3736         struct ixgbevf_adapter *adapter = NULL;
3737         struct ixgbe_hw *hw = NULL;
3738         const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3739         int err, pci_using_dac;
3740
3741         err = pci_enable_device(pdev);
3742         if (err)
3743                 return err;
3744
3745         if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3746                 pci_using_dac = 1;
3747         } else {
3748                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3749                 if (err) {
3750                         dev_err(&pdev->dev, "No usable DMA "
3751                                 "configuration, aborting\n");
3752                         goto err_dma;
3753                 }
3754                 pci_using_dac = 0;
3755         }
3756
3757         err = pci_request_regions(pdev, ixgbevf_driver_name);
3758         if (err) {
3759                 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3760                 goto err_pci_reg;
3761         }
3762
3763         pci_set_master(pdev);
3764
3765         netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3766                                    MAX_TX_QUEUES);
3767         if (!netdev) {
3768                 err = -ENOMEM;
3769                 goto err_alloc_etherdev;
3770         }
3771
3772         SET_NETDEV_DEV(netdev, &pdev->dev);
3773
3774         pci_set_drvdata(pdev, netdev);
3775         adapter = netdev_priv(netdev);
3776
3777         adapter->netdev = netdev;
3778         adapter->pdev = pdev;
3779         hw = &adapter->hw;
3780         hw->back = adapter;
3781         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3782
3783         /*
3784          * call save state here in standalone driver because it relies on
3785          * adapter struct to exist, and needs to call netdev_priv
3786          */
3787         pci_save_state(pdev);
3788
3789         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3790                               pci_resource_len(pdev, 0));
3791         adapter->io_addr = hw->hw_addr;
3792         if (!hw->hw_addr) {
3793                 err = -EIO;
3794                 goto err_ioremap;
3795         }
3796
3797         ixgbevf_assign_netdev_ops(netdev);
3798
3799         /* Setup hw api */
3800         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3801         hw->mac.type  = ii->mac;
3802
3803         memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3804                sizeof(struct ixgbe_mbx_operations));
3805
3806         /* setup the private structure */
3807         err = ixgbevf_sw_init(adapter);
3808         if (err)
3809                 goto err_sw_init;
3810
3811         /* The HW MAC address was set and/or determined in sw_init */
3812         if (!is_valid_ether_addr(netdev->dev_addr)) {
3813                 pr_err("invalid MAC address\n");
3814                 err = -EIO;
3815                 goto err_sw_init;
3816         }
3817
3818         netdev->hw_features = NETIF_F_SG |
3819                            NETIF_F_IP_CSUM |
3820                            NETIF_F_IPV6_CSUM |
3821                            NETIF_F_TSO |
3822                            NETIF_F_TSO6 |
3823                            NETIF_F_RXCSUM;
3824
3825         netdev->features = netdev->hw_features |
3826                            NETIF_F_HW_VLAN_CTAG_TX |
3827                            NETIF_F_HW_VLAN_CTAG_RX |
3828                            NETIF_F_HW_VLAN_CTAG_FILTER;
3829
3830         netdev->vlan_features |= NETIF_F_TSO;
3831         netdev->vlan_features |= NETIF_F_TSO6;
3832         netdev->vlan_features |= NETIF_F_IP_CSUM;
3833         netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3834         netdev->vlan_features |= NETIF_F_SG;
3835
3836         if (pci_using_dac)
3837                 netdev->features |= NETIF_F_HIGHDMA;
3838
3839         netdev->priv_flags |= IFF_UNICAST_FLT;
3840
3841         init_timer(&adapter->watchdog_timer);
3842         adapter->watchdog_timer.function = ixgbevf_watchdog;
3843         adapter->watchdog_timer.data = (unsigned long)adapter;
3844
3845         if (IXGBE_REMOVED(hw->hw_addr)) {
3846                 err = -EIO;
3847                 goto err_sw_init;
3848         }
3849         INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3850         INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3851         set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
3852
3853         err = ixgbevf_init_interrupt_scheme(adapter);
3854         if (err)
3855                 goto err_sw_init;
3856
3857         strcpy(netdev->name, "eth%d");
3858
3859         err = register_netdev(netdev);
3860         if (err)
3861                 goto err_register;
3862
3863         netif_carrier_off(netdev);
3864
3865         ixgbevf_init_last_counter_stats(adapter);
3866
3867         /* print the VF info */
3868         dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
3869         dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
3870
3871         switch (hw->mac.type) {
3872         case ixgbe_mac_X550_vf:
3873                 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
3874                 break;
3875         case ixgbe_mac_X540_vf:
3876                 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
3877                 break;
3878         case ixgbe_mac_82599_vf:
3879         default:
3880                 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
3881                 break;
3882         }
3883
3884         return 0;
3885
3886 err_register:
3887         ixgbevf_clear_interrupt_scheme(adapter);
3888 err_sw_init:
3889         ixgbevf_reset_interrupt_capability(adapter);
3890         iounmap(adapter->io_addr);
3891 err_ioremap:
3892         free_netdev(netdev);
3893 err_alloc_etherdev:
3894         pci_release_regions(pdev);
3895 err_pci_reg:
3896 err_dma:
3897         if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3898                 pci_disable_device(pdev);
3899         return err;
3900 }
3901
3902 /**
3903  * ixgbevf_remove - Device Removal Routine
3904  * @pdev: PCI device information struct
3905  *
3906  * ixgbevf_remove is called by the PCI subsystem to alert the driver
3907  * that it should release a PCI device.  The could be caused by a
3908  * Hot-Plug event, or because the driver is going to be removed from
3909  * memory.
3910  **/
3911 static void ixgbevf_remove(struct pci_dev *pdev)
3912 {
3913         struct net_device *netdev = pci_get_drvdata(pdev);
3914         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3915
3916         set_bit(__IXGBEVF_REMOVING, &adapter->state);
3917
3918         del_timer_sync(&adapter->watchdog_timer);
3919
3920         cancel_work_sync(&adapter->reset_task);
3921         cancel_work_sync(&adapter->watchdog_task);
3922
3923         if (netdev->reg_state == NETREG_REGISTERED)
3924                 unregister_netdev(netdev);
3925
3926         ixgbevf_clear_interrupt_scheme(adapter);
3927         ixgbevf_reset_interrupt_capability(adapter);
3928
3929         iounmap(adapter->io_addr);
3930         pci_release_regions(pdev);
3931
3932         hw_dbg(&adapter->hw, "Remove complete\n");
3933
3934         free_netdev(netdev);
3935
3936         if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3937                 pci_disable_device(pdev);
3938 }
3939
3940 /**
3941  * ixgbevf_io_error_detected - called when PCI error is detected
3942  * @pdev: Pointer to PCI device
3943  * @state: The current pci connection state
3944  *
3945  * This function is called after a PCI bus error affecting
3946  * this device has been detected.
3947  */
3948 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3949                                                   pci_channel_state_t state)
3950 {
3951         struct net_device *netdev = pci_get_drvdata(pdev);
3952         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3953
3954         if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
3955                 return PCI_ERS_RESULT_DISCONNECT;
3956
3957         rtnl_lock();
3958         netif_device_detach(netdev);
3959
3960         if (state == pci_channel_io_perm_failure) {
3961                 rtnl_unlock();
3962                 return PCI_ERS_RESULT_DISCONNECT;
3963         }
3964
3965         if (netif_running(netdev))
3966                 ixgbevf_down(adapter);
3967
3968         if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3969                 pci_disable_device(pdev);
3970         rtnl_unlock();
3971
3972         /* Request a slot slot reset. */
3973         return PCI_ERS_RESULT_NEED_RESET;
3974 }
3975
3976 /**
3977  * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3978  * @pdev: Pointer to PCI device
3979  *
3980  * Restart the card from scratch, as if from a cold-boot. Implementation
3981  * resembles the first-half of the ixgbevf_resume routine.
3982  */
3983 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3984 {
3985         struct net_device *netdev = pci_get_drvdata(pdev);
3986         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3987
3988         if (pci_enable_device_mem(pdev)) {
3989                 dev_err(&pdev->dev,
3990                         "Cannot re-enable PCI device after reset.\n");
3991                 return PCI_ERS_RESULT_DISCONNECT;
3992         }
3993
3994         smp_mb__before_atomic();
3995         clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3996         pci_set_master(pdev);
3997
3998         ixgbevf_reset(adapter);
3999
4000         return PCI_ERS_RESULT_RECOVERED;
4001 }
4002
4003 /**
4004  * ixgbevf_io_resume - called when traffic can start flowing again.
4005  * @pdev: Pointer to PCI device
4006  *
4007  * This callback is called when the error recovery driver tells us that
4008  * its OK to resume normal operation. Implementation resembles the
4009  * second-half of the ixgbevf_resume routine.
4010  */
4011 static void ixgbevf_io_resume(struct pci_dev *pdev)
4012 {
4013         struct net_device *netdev = pci_get_drvdata(pdev);
4014         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4015
4016         if (netif_running(netdev))
4017                 ixgbevf_up(adapter);
4018
4019         netif_device_attach(netdev);
4020 }
4021
4022 /* PCI Error Recovery (ERS) */
4023 static const struct pci_error_handlers ixgbevf_err_handler = {
4024         .error_detected = ixgbevf_io_error_detected,
4025         .slot_reset = ixgbevf_io_slot_reset,
4026         .resume = ixgbevf_io_resume,
4027 };
4028
4029 static struct pci_driver ixgbevf_driver = {
4030         .name     = ixgbevf_driver_name,
4031         .id_table = ixgbevf_pci_tbl,
4032         .probe    = ixgbevf_probe,
4033         .remove   = ixgbevf_remove,
4034 #ifdef CONFIG_PM
4035         /* Power Management Hooks */
4036         .suspend  = ixgbevf_suspend,
4037         .resume   = ixgbevf_resume,
4038 #endif
4039         .shutdown = ixgbevf_shutdown,
4040         .err_handler = &ixgbevf_err_handler
4041 };
4042
4043 /**
4044  * ixgbevf_init_module - Driver Registration Routine
4045  *
4046  * ixgbevf_init_module is the first routine called when the driver is
4047  * loaded. All it does is register with the PCI subsystem.
4048  **/
4049 static int __init ixgbevf_init_module(void)
4050 {
4051         int ret;
4052         pr_info("%s - version %s\n", ixgbevf_driver_string,
4053                 ixgbevf_driver_version);
4054
4055         pr_info("%s\n", ixgbevf_copyright);
4056
4057         ret = pci_register_driver(&ixgbevf_driver);
4058         return ret;
4059 }
4060
4061 module_init(ixgbevf_init_module);
4062
4063 /**
4064  * ixgbevf_exit_module - Driver Exit Cleanup Routine
4065  *
4066  * ixgbevf_exit_module is called just before the driver is removed
4067  * from memory.
4068  **/
4069 static void __exit ixgbevf_exit_module(void)
4070 {
4071         pci_unregister_driver(&ixgbevf_driver);
4072 }
4073
4074 #ifdef DEBUG
4075 /**
4076  * ixgbevf_get_hw_dev_name - return device name string
4077  * used by hardware layer to print debugging information
4078  **/
4079 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4080 {
4081         struct ixgbevf_adapter *adapter = hw->back;
4082         return adapter->netdev->name;
4083 }
4084
4085 #endif
4086 module_exit(ixgbevf_exit_module);
4087
4088 /* ixgbevf_main.c */