a9dd67814c2dc738313ae68c76be346d157587bc
[cascardo/linux.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
1 /*******************************************************************************
2
3   Intel 82599 Virtual Function driver
4   Copyright(c) 1999 - 2012 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28
29 /******************************************************************************
30  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/ip.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
51 #include <linux/if.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
54
55 #include "ixgbevf.h"
56
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59         "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60
61 #define DRV_VERSION "2.12.1-k"
62 const char ixgbevf_driver_version[] = DRV_VERSION;
63 static char ixgbevf_copyright[] =
64         "Copyright (c) 2009 - 2012 Intel Corporation.";
65
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67         [board_82599_vf] = &ixgbevf_82599_vf_info,
68         [board_X540_vf]  = &ixgbevf_X540_vf_info,
69 };
70
71 /* ixgbevf_pci_tbl - PCI Device ID Table
72  *
73  * Wildcard entries (PCI_ANY_ID) should come last
74  * Last entry must be all 0s
75  *
76  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77  *   Class, Class Mask, private data (not used) }
78  */
79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82         /* required last entry */
83         {0, }
84 };
85 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86
87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION);
91
92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93 static int debug = -1;
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
96
97 /* forward decls */
98 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
99 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
100 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
101
102 static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
103                                            u32 val)
104 {
105         rx_ring->next_to_use = val;
106
107         /*
108          * Force memory writes to complete before letting h/w
109          * know there are new descriptors to fetch.  (Only
110          * applicable for weak-ordered memory model archs,
111          * such as IA-64).
112          */
113         wmb();
114         writel(val, rx_ring->tail);
115 }
116
117 /**
118  * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
119  * @adapter: pointer to adapter struct
120  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
121  * @queue: queue to map the corresponding interrupt to
122  * @msix_vector: the vector to map to the corresponding queue
123  */
124 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
125                              u8 queue, u8 msix_vector)
126 {
127         u32 ivar, index;
128         struct ixgbe_hw *hw = &adapter->hw;
129         if (direction == -1) {
130                 /* other causes */
131                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
132                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
133                 ivar &= ~0xFF;
134                 ivar |= msix_vector;
135                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
136         } else {
137                 /* tx or rx causes */
138                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
139                 index = ((16 * (queue & 1)) + (8 * direction));
140                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
141                 ivar &= ~(0xFF << index);
142                 ivar |= (msix_vector << index);
143                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
144         }
145 }
146
147 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
148                                                struct ixgbevf_tx_buffer
149                                                *tx_buffer_info)
150 {
151         if (tx_buffer_info->dma) {
152                 if (tx_buffer_info->mapped_as_page)
153                         dma_unmap_page(tx_ring->dev,
154                                        tx_buffer_info->dma,
155                                        tx_buffer_info->length,
156                                        DMA_TO_DEVICE);
157                 else
158                         dma_unmap_single(tx_ring->dev,
159                                          tx_buffer_info->dma,
160                                          tx_buffer_info->length,
161                                          DMA_TO_DEVICE);
162                 tx_buffer_info->dma = 0;
163         }
164         if (tx_buffer_info->skb) {
165                 dev_kfree_skb_any(tx_buffer_info->skb);
166                 tx_buffer_info->skb = NULL;
167         }
168         tx_buffer_info->time_stamp = 0;
169         /* tx_buffer_info must be completely set up in the transmit path */
170 }
171
172 #define IXGBE_MAX_TXD_PWR       14
173 #define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
174
175 /* Tx Descriptors needed, worst case */
176 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
177 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
178
179 static void ixgbevf_tx_timeout(struct net_device *netdev);
180
181 /**
182  * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
183  * @q_vector: board private structure
184  * @tx_ring: tx ring to clean
185  **/
186 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
187                                  struct ixgbevf_ring *tx_ring)
188 {
189         struct ixgbevf_adapter *adapter = q_vector->adapter;
190         union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
191         struct ixgbevf_tx_buffer *tx_buffer_info;
192         unsigned int i, count = 0;
193         unsigned int total_bytes = 0, total_packets = 0;
194
195         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
196                 return true;
197
198         i = tx_ring->next_to_clean;
199         tx_buffer_info = &tx_ring->tx_buffer_info[i];
200         eop_desc = tx_buffer_info->next_to_watch;
201
202         do {
203                 bool cleaned = false;
204
205                 /* if next_to_watch is not set then there is no work pending */
206                 if (!eop_desc)
207                         break;
208
209                 /* prevent any other reads prior to eop_desc */
210                 read_barrier_depends();
211
212                 /* if DD is not set pending work has not been completed */
213                 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
214                         break;
215
216                 /* clear next_to_watch to prevent false hangs */
217                 tx_buffer_info->next_to_watch = NULL;
218
219                 for ( ; !cleaned; count++) {
220                         struct sk_buff *skb;
221                         tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
222                         cleaned = (tx_desc == eop_desc);
223                         skb = tx_buffer_info->skb;
224
225                         if (cleaned && skb) {
226                                 unsigned int segs, bytecount;
227
228                                 /* gso_segs is currently only valid for tcp */
229                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
230                                 /* multiply data chunks by size of headers */
231                                 bytecount = ((segs - 1) * skb_headlen(skb)) +
232                                             skb->len;
233                                 total_packets += segs;
234                                 total_bytes += bytecount;
235                         }
236
237                         ixgbevf_unmap_and_free_tx_resource(tx_ring,
238                                                            tx_buffer_info);
239
240                         tx_desc->wb.status = 0;
241
242                         i++;
243                         if (i == tx_ring->count)
244                                 i = 0;
245
246                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
247                 }
248
249                 eop_desc = tx_buffer_info->next_to_watch;
250         } while (count < tx_ring->count);
251
252         tx_ring->next_to_clean = i;
253
254 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
255         if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
256                      (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
257                 /* Make sure that anybody stopping the queue after this
258                  * sees the new next_to_clean.
259                  */
260                 smp_mb();
261                 if (__netif_subqueue_stopped(tx_ring->netdev,
262                                              tx_ring->queue_index) &&
263                     !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
264                         netif_wake_subqueue(tx_ring->netdev,
265                                             tx_ring->queue_index);
266                         ++adapter->restart_queue;
267                 }
268         }
269
270         u64_stats_update_begin(&tx_ring->syncp);
271         tx_ring->total_bytes += total_bytes;
272         tx_ring->total_packets += total_packets;
273         u64_stats_update_end(&tx_ring->syncp);
274         q_vector->tx.total_bytes += total_bytes;
275         q_vector->tx.total_packets += total_packets;
276
277         return count < tx_ring->count;
278 }
279
280 /**
281  * ixgbevf_receive_skb - Send a completed packet up the stack
282  * @q_vector: structure containing interrupt and ring information
283  * @skb: packet to send up
284  * @status: hardware indication of status of receive
285  * @rx_desc: rx descriptor
286  **/
287 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
288                                 struct sk_buff *skb, u8 status,
289                                 union ixgbe_adv_rx_desc *rx_desc)
290 {
291         struct ixgbevf_adapter *adapter = q_vector->adapter;
292         bool is_vlan = (status & IXGBE_RXD_STAT_VP);
293         u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
294
295         if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
296                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
297
298         if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
299                 napi_gro_receive(&q_vector->napi, skb);
300         else
301                 netif_rx(skb);
302 }
303
304 /**
305  * ixgbevf_rx_skb - Helper function to determine proper Rx method
306  * @q_vector: structure containing interrupt and ring information
307  * @skb: packet to send up
308  * @status: hardware indication of status of receive
309  * @rx_desc: rx descriptor
310  **/
311 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
312                            struct sk_buff *skb, u8 status,
313                            union ixgbe_adv_rx_desc *rx_desc)
314 {
315 #ifdef CONFIG_NET_RX_BUSY_POLL
316         skb_mark_napi_id(skb, &q_vector->napi);
317
318         if (ixgbevf_qv_busy_polling(q_vector)) {
319                 netif_receive_skb(skb);
320                 /* exit early if we busy polled */
321                 return;
322         }
323 #endif /* CONFIG_NET_RX_BUSY_POLL */
324
325         ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
326 }
327
328 /**
329  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
330  * @ring: pointer to Rx descriptor ring structure
331  * @status_err: hardware indication of status of receive
332  * @skb: skb currently being received and modified
333  **/
334 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
335                                        u32 status_err, struct sk_buff *skb)
336 {
337         skb_checksum_none_assert(skb);
338
339         /* Rx csum disabled */
340         if (!(ring->netdev->features & NETIF_F_RXCSUM))
341                 return;
342
343         /* if IP and error */
344         if ((status_err & IXGBE_RXD_STAT_IPCS) &&
345             (status_err & IXGBE_RXDADV_ERR_IPE)) {
346                 ring->hw_csum_rx_error++;
347                 return;
348         }
349
350         if (!(status_err & IXGBE_RXD_STAT_L4CS))
351                 return;
352
353         if (status_err & IXGBE_RXDADV_ERR_TCPE) {
354                 ring->hw_csum_rx_error++;
355                 return;
356         }
357
358         /* It must be a TCP or UDP packet with a valid checksum */
359         skb->ip_summed = CHECKSUM_UNNECESSARY;
360         ring->hw_csum_rx_good++;
361 }
362
363 /**
364  * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
365  * @adapter: address of board private structure
366  **/
367 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
368                                      struct ixgbevf_ring *rx_ring,
369                                      int cleaned_count)
370 {
371         union ixgbe_adv_rx_desc *rx_desc;
372         struct ixgbevf_rx_buffer *bi;
373         unsigned int i = rx_ring->next_to_use;
374
375         while (cleaned_count--) {
376                 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
377                 bi = &rx_ring->rx_buffer_info[i];
378
379                 if (!bi->skb) {
380                         struct sk_buff *skb;
381
382                         skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
383                                                         rx_ring->rx_buf_len);
384                         if (!skb)
385                                 goto no_buffers;
386
387                         bi->skb = skb;
388
389                         bi->dma = dma_map_single(rx_ring->dev, skb->data,
390                                                  rx_ring->rx_buf_len,
391                                                  DMA_FROM_DEVICE);
392                         if (dma_mapping_error(rx_ring->dev, bi->dma)) {
393                                 dev_kfree_skb(skb);
394                                 bi->skb = NULL;
395                                 dev_err(rx_ring->dev, "Rx DMA map failed\n");
396                                 break;
397                         }
398                 }
399                 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
400
401                 i++;
402                 if (i == rx_ring->count)
403                         i = 0;
404         }
405
406 no_buffers:
407         adapter->alloc_rx_buff_failed++;
408         if (rx_ring->next_to_use != i)
409                 ixgbevf_release_rx_desc(rx_ring, i);
410 }
411
412 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
413                                              u32 qmask)
414 {
415         struct ixgbe_hw *hw = &adapter->hw;
416
417         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
418 }
419
420 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
421                                 struct ixgbevf_ring *rx_ring,
422                                 int budget)
423 {
424         struct ixgbevf_adapter *adapter = q_vector->adapter;
425         union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
426         struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
427         struct sk_buff *skb;
428         unsigned int i;
429         u32 len, staterr;
430         int cleaned_count = 0;
431         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
432
433         i = rx_ring->next_to_clean;
434         rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
435         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
436         rx_buffer_info = &rx_ring->rx_buffer_info[i];
437
438         while (staterr & IXGBE_RXD_STAT_DD) {
439                 if (!budget)
440                         break;
441                 budget--;
442
443                 rmb(); /* read descriptor and rx_buffer_info after status DD */
444                 len = le16_to_cpu(rx_desc->wb.upper.length);
445                 skb = rx_buffer_info->skb;
446                 prefetch(skb->data - NET_IP_ALIGN);
447                 rx_buffer_info->skb = NULL;
448
449                 if (rx_buffer_info->dma) {
450                         dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
451                                          rx_ring->rx_buf_len,
452                                          DMA_FROM_DEVICE);
453                         rx_buffer_info->dma = 0;
454                         skb_put(skb, len);
455                 }
456
457                 i++;
458                 if (i == rx_ring->count)
459                         i = 0;
460
461                 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
462                 prefetch(next_rxd);
463                 cleaned_count++;
464
465                 next_buffer = &rx_ring->rx_buffer_info[i];
466
467                 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
468                         skb->next = next_buffer->skb;
469                         IXGBE_CB(skb->next)->prev = skb;
470                         adapter->non_eop_descs++;
471                         goto next_desc;
472                 }
473
474                 /* we should not be chaining buffers, if we did drop the skb */
475                 if (IXGBE_CB(skb)->prev) {
476                         do {
477                                 struct sk_buff *this = skb;
478                                 skb = IXGBE_CB(skb)->prev;
479                                 dev_kfree_skb(this);
480                         } while (skb);
481                         goto next_desc;
482                 }
483
484                 /* ERR_MASK will only have valid bits if EOP set */
485                 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
486                         dev_kfree_skb_irq(skb);
487                         goto next_desc;
488                 }
489
490                 ixgbevf_rx_checksum(rx_ring, staterr, skb);
491
492                 /* probably a little skewed due to removing CRC */
493                 total_rx_bytes += skb->len;
494                 total_rx_packets++;
495
496                 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
497
498                 /* Workaround hardware that can't do proper VEPA multicast
499                  * source pruning.
500                  */
501                 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
502                     ether_addr_equal(adapter->netdev->dev_addr,
503                                      eth_hdr(skb)->h_source)) {
504                         dev_kfree_skb_irq(skb);
505                         goto next_desc;
506                 }
507
508                 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
509
510 next_desc:
511                 rx_desc->wb.upper.status_error = 0;
512
513                 /* return some buffers to hardware, one at a time is too slow */
514                 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
515                         ixgbevf_alloc_rx_buffers(adapter, rx_ring,
516                                                  cleaned_count);
517                         cleaned_count = 0;
518                 }
519
520                 /* use prefetched values */
521                 rx_desc = next_rxd;
522                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
523
524                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
525         }
526
527         rx_ring->next_to_clean = i;
528         cleaned_count = ixgbevf_desc_unused(rx_ring);
529
530         if (cleaned_count)
531                 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
532
533         u64_stats_update_begin(&rx_ring->syncp);
534         rx_ring->total_packets += total_rx_packets;
535         rx_ring->total_bytes += total_rx_bytes;
536         u64_stats_update_end(&rx_ring->syncp);
537         q_vector->rx.total_packets += total_rx_packets;
538         q_vector->rx.total_bytes += total_rx_bytes;
539
540         return total_rx_packets;
541 }
542
543 /**
544  * ixgbevf_poll - NAPI polling calback
545  * @napi: napi struct with our devices info in it
546  * @budget: amount of work driver is allowed to do this pass, in packets
547  *
548  * This function will clean more than one or more rings associated with a
549  * q_vector.
550  **/
551 static int ixgbevf_poll(struct napi_struct *napi, int budget)
552 {
553         struct ixgbevf_q_vector *q_vector =
554                 container_of(napi, struct ixgbevf_q_vector, napi);
555         struct ixgbevf_adapter *adapter = q_vector->adapter;
556         struct ixgbevf_ring *ring;
557         int per_ring_budget;
558         bool clean_complete = true;
559
560         ixgbevf_for_each_ring(ring, q_vector->tx)
561                 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
562
563 #ifdef CONFIG_NET_RX_BUSY_POLL
564         if (!ixgbevf_qv_lock_napi(q_vector))
565                 return budget;
566 #endif
567
568         /* attempt to distribute budget to each queue fairly, but don't allow
569          * the budget to go below 1 because we'll exit polling */
570         if (q_vector->rx.count > 1)
571                 per_ring_budget = max(budget/q_vector->rx.count, 1);
572         else
573                 per_ring_budget = budget;
574
575         adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
576         ixgbevf_for_each_ring(ring, q_vector->rx)
577                 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
578                                                         per_ring_budget)
579                                    < per_ring_budget);
580         adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
581
582 #ifdef CONFIG_NET_RX_BUSY_POLL
583         ixgbevf_qv_unlock_napi(q_vector);
584 #endif
585
586         /* If all work not completed, return budget and keep polling */
587         if (!clean_complete)
588                 return budget;
589         /* all work done, exit the polling mode */
590         napi_complete(napi);
591         if (adapter->rx_itr_setting & 1)
592                 ixgbevf_set_itr(q_vector);
593         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
594                 ixgbevf_irq_enable_queues(adapter,
595                                           1 << q_vector->v_idx);
596
597         return 0;
598 }
599
600 /**
601  * ixgbevf_write_eitr - write VTEITR register in hardware specific way
602  * @q_vector: structure containing interrupt and ring information
603  */
604 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
605 {
606         struct ixgbevf_adapter *adapter = q_vector->adapter;
607         struct ixgbe_hw *hw = &adapter->hw;
608         int v_idx = q_vector->v_idx;
609         u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
610
611         /*
612          * set the WDIS bit to not clear the timer bits and cause an
613          * immediate assertion of the interrupt
614          */
615         itr_reg |= IXGBE_EITR_CNT_WDIS;
616
617         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
618 }
619
620 #ifdef CONFIG_NET_RX_BUSY_POLL
621 /* must be called with local_bh_disable()d */
622 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
623 {
624         struct ixgbevf_q_vector *q_vector =
625                         container_of(napi, struct ixgbevf_q_vector, napi);
626         struct ixgbevf_adapter *adapter = q_vector->adapter;
627         struct ixgbevf_ring  *ring;
628         int found = 0;
629
630         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
631                 return LL_FLUSH_FAILED;
632
633         if (!ixgbevf_qv_lock_poll(q_vector))
634                 return LL_FLUSH_BUSY;
635
636         ixgbevf_for_each_ring(ring, q_vector->rx) {
637                 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
638 #ifdef BP_EXTENDED_STATS
639                 if (found)
640                         ring->bp_cleaned += found;
641                 else
642                         ring->bp_misses++;
643 #endif
644                 if (found)
645                         break;
646         }
647
648         ixgbevf_qv_unlock_poll(q_vector);
649
650         return found;
651 }
652 #endif /* CONFIG_NET_RX_BUSY_POLL */
653
654 /**
655  * ixgbevf_configure_msix - Configure MSI-X hardware
656  * @adapter: board private structure
657  *
658  * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
659  * interrupts.
660  **/
661 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
662 {
663         struct ixgbevf_q_vector *q_vector;
664         int q_vectors, v_idx;
665
666         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
667         adapter->eims_enable_mask = 0;
668
669         /*
670          * Populate the IVAR table and set the ITR values to the
671          * corresponding register.
672          */
673         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
674                 struct ixgbevf_ring *ring;
675                 q_vector = adapter->q_vector[v_idx];
676
677                 ixgbevf_for_each_ring(ring, q_vector->rx)
678                         ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
679
680                 ixgbevf_for_each_ring(ring, q_vector->tx)
681                         ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
682
683                 if (q_vector->tx.ring && !q_vector->rx.ring) {
684                         /* tx only vector */
685                         if (adapter->tx_itr_setting == 1)
686                                 q_vector->itr = IXGBE_10K_ITR;
687                         else
688                                 q_vector->itr = adapter->tx_itr_setting;
689                 } else {
690                         /* rx or rx/tx vector */
691                         if (adapter->rx_itr_setting == 1)
692                                 q_vector->itr = IXGBE_20K_ITR;
693                         else
694                                 q_vector->itr = adapter->rx_itr_setting;
695                 }
696
697                 /* add q_vector eims value to global eims_enable_mask */
698                 adapter->eims_enable_mask |= 1 << v_idx;
699
700                 ixgbevf_write_eitr(q_vector);
701         }
702
703         ixgbevf_set_ivar(adapter, -1, 1, v_idx);
704         /* setup eims_other and add value to global eims_enable_mask */
705         adapter->eims_other = 1 << v_idx;
706         adapter->eims_enable_mask |= adapter->eims_other;
707 }
708
709 enum latency_range {
710         lowest_latency = 0,
711         low_latency = 1,
712         bulk_latency = 2,
713         latency_invalid = 255
714 };
715
716 /**
717  * ixgbevf_update_itr - update the dynamic ITR value based on statistics
718  * @q_vector: structure containing interrupt and ring information
719  * @ring_container: structure containing ring performance data
720  *
721  *      Stores a new ITR value based on packets and byte
722  *      counts during the last interrupt.  The advantage of per interrupt
723  *      computation is faster updates and more accurate ITR for the current
724  *      traffic pattern.  Constants in this function were computed
725  *      based on theoretical maximum wire speed and thresholds were set based
726  *      on testing data as well as attempting to minimize response time
727  *      while increasing bulk throughput.
728  **/
729 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
730                                struct ixgbevf_ring_container *ring_container)
731 {
732         int bytes = ring_container->total_bytes;
733         int packets = ring_container->total_packets;
734         u32 timepassed_us;
735         u64 bytes_perint;
736         u8 itr_setting = ring_container->itr;
737
738         if (packets == 0)
739                 return;
740
741         /* simple throttlerate management
742          *    0-20MB/s lowest (100000 ints/s)
743          *   20-100MB/s low   (20000 ints/s)
744          *  100-1249MB/s bulk (8000 ints/s)
745          */
746         /* what was last interrupt timeslice? */
747         timepassed_us = q_vector->itr >> 2;
748         bytes_perint = bytes / timepassed_us; /* bytes/usec */
749
750         switch (itr_setting) {
751         case lowest_latency:
752                 if (bytes_perint > 10)
753                         itr_setting = low_latency;
754                 break;
755         case low_latency:
756                 if (bytes_perint > 20)
757                         itr_setting = bulk_latency;
758                 else if (bytes_perint <= 10)
759                         itr_setting = lowest_latency;
760                 break;
761         case bulk_latency:
762                 if (bytes_perint <= 20)
763                         itr_setting = low_latency;
764                 break;
765         }
766
767         /* clear work counters since we have the values we need */
768         ring_container->total_bytes = 0;
769         ring_container->total_packets = 0;
770
771         /* write updated itr to ring container */
772         ring_container->itr = itr_setting;
773 }
774
775 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
776 {
777         u32 new_itr = q_vector->itr;
778         u8 current_itr;
779
780         ixgbevf_update_itr(q_vector, &q_vector->tx);
781         ixgbevf_update_itr(q_vector, &q_vector->rx);
782
783         current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
784
785         switch (current_itr) {
786         /* counts and packets in update_itr are dependent on these numbers */
787         case lowest_latency:
788                 new_itr = IXGBE_100K_ITR;
789                 break;
790         case low_latency:
791                 new_itr = IXGBE_20K_ITR;
792                 break;
793         case bulk_latency:
794         default:
795                 new_itr = IXGBE_8K_ITR;
796                 break;
797         }
798
799         if (new_itr != q_vector->itr) {
800                 /* do an exponential smoothing */
801                 new_itr = (10 * new_itr * q_vector->itr) /
802                           ((9 * new_itr) + q_vector->itr);
803
804                 /* save the algorithm value here */
805                 q_vector->itr = new_itr;
806
807                 ixgbevf_write_eitr(q_vector);
808         }
809 }
810
811 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
812 {
813         struct ixgbevf_adapter *adapter = data;
814         struct ixgbe_hw *hw = &adapter->hw;
815
816         hw->mac.get_link_status = 1;
817
818         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
819                 mod_timer(&adapter->watchdog_timer, jiffies);
820
821         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
822
823         return IRQ_HANDLED;
824 }
825
826 /**
827  * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
828  * @irq: unused
829  * @data: pointer to our q_vector struct for this interrupt vector
830  **/
831 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
832 {
833         struct ixgbevf_q_vector *q_vector = data;
834
835         /* EIAM disabled interrupts (on this vector) for us */
836         if (q_vector->rx.ring || q_vector->tx.ring)
837                 napi_schedule(&q_vector->napi);
838
839         return IRQ_HANDLED;
840 }
841
842 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
843                                      int r_idx)
844 {
845         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
846
847         a->rx_ring[r_idx]->next = q_vector->rx.ring;
848         q_vector->rx.ring = a->rx_ring[r_idx];
849         q_vector->rx.count++;
850 }
851
852 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
853                                      int t_idx)
854 {
855         struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
856
857         a->tx_ring[t_idx]->next = q_vector->tx.ring;
858         q_vector->tx.ring = a->tx_ring[t_idx];
859         q_vector->tx.count++;
860 }
861
862 /**
863  * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
864  * @adapter: board private structure to initialize
865  *
866  * This function maps descriptor rings to the queue-specific vectors
867  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
868  * one vector per ring/queue, but on a constrained vector budget, we
869  * group the rings as "efficiently" as possible.  You would add new
870  * mapping configurations in here.
871  **/
872 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
873 {
874         int q_vectors;
875         int v_start = 0;
876         int rxr_idx = 0, txr_idx = 0;
877         int rxr_remaining = adapter->num_rx_queues;
878         int txr_remaining = adapter->num_tx_queues;
879         int i, j;
880         int rqpv, tqpv;
881         int err = 0;
882
883         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
884
885         /*
886          * The ideal configuration...
887          * We have enough vectors to map one per queue.
888          */
889         if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
890                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
891                         map_vector_to_rxq(adapter, v_start, rxr_idx);
892
893                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
894                         map_vector_to_txq(adapter, v_start, txr_idx);
895                 goto out;
896         }
897
898         /*
899          * If we don't have enough vectors for a 1-to-1
900          * mapping, we'll have to group them so there are
901          * multiple queues per vector.
902          */
903         /* Re-adjusting *qpv takes care of the remainder. */
904         for (i = v_start; i < q_vectors; i++) {
905                 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
906                 for (j = 0; j < rqpv; j++) {
907                         map_vector_to_rxq(adapter, i, rxr_idx);
908                         rxr_idx++;
909                         rxr_remaining--;
910                 }
911         }
912         for (i = v_start; i < q_vectors; i++) {
913                 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
914                 for (j = 0; j < tqpv; j++) {
915                         map_vector_to_txq(adapter, i, txr_idx);
916                         txr_idx++;
917                         txr_remaining--;
918                 }
919         }
920
921 out:
922         return err;
923 }
924
925 /**
926  * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
927  * @adapter: board private structure
928  *
929  * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
930  * interrupts from the kernel.
931  **/
932 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
933 {
934         struct net_device *netdev = adapter->netdev;
935         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
936         int vector, err;
937         int ri = 0, ti = 0;
938
939         for (vector = 0; vector < q_vectors; vector++) {
940                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
941                 struct msix_entry *entry = &adapter->msix_entries[vector];
942
943                 if (q_vector->tx.ring && q_vector->rx.ring) {
944                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
945                                  "%s-%s-%d", netdev->name, "TxRx", ri++);
946                         ti++;
947                 } else if (q_vector->rx.ring) {
948                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
949                                  "%s-%s-%d", netdev->name, "rx", ri++);
950                 } else if (q_vector->tx.ring) {
951                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
952                                  "%s-%s-%d", netdev->name, "tx", ti++);
953                 } else {
954                         /* skip this unused q_vector */
955                         continue;
956                 }
957                 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
958                                   q_vector->name, q_vector);
959                 if (err) {
960                         hw_dbg(&adapter->hw,
961                                "request_irq failed for MSIX interrupt "
962                                "Error: %d\n", err);
963                         goto free_queue_irqs;
964                 }
965         }
966
967         err = request_irq(adapter->msix_entries[vector].vector,
968                           &ixgbevf_msix_other, 0, netdev->name, adapter);
969         if (err) {
970                 hw_dbg(&adapter->hw,
971                        "request_irq for msix_other failed: %d\n", err);
972                 goto free_queue_irqs;
973         }
974
975         return 0;
976
977 free_queue_irqs:
978         while (vector) {
979                 vector--;
980                 free_irq(adapter->msix_entries[vector].vector,
981                          adapter->q_vector[vector]);
982         }
983         /* This failure is non-recoverable - it indicates the system is
984          * out of MSIX vector resources and the VF driver cannot run
985          * without them.  Set the number of msix vectors to zero
986          * indicating that not enough can be allocated.  The error
987          * will be returned to the user indicating device open failed.
988          * Any further attempts to force the driver to open will also
989          * fail.  The only way to recover is to unload the driver and
990          * reload it again.  If the system has recovered some MSIX
991          * vectors then it may succeed.
992          */
993         adapter->num_msix_vectors = 0;
994         return err;
995 }
996
997 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
998 {
999         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1000
1001         for (i = 0; i < q_vectors; i++) {
1002                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1003                 q_vector->rx.ring = NULL;
1004                 q_vector->tx.ring = NULL;
1005                 q_vector->rx.count = 0;
1006                 q_vector->tx.count = 0;
1007         }
1008 }
1009
1010 /**
1011  * ixgbevf_request_irq - initialize interrupts
1012  * @adapter: board private structure
1013  *
1014  * Attempts to configure interrupts using the best available
1015  * capabilities of the hardware and kernel.
1016  **/
1017 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1018 {
1019         int err = 0;
1020
1021         err = ixgbevf_request_msix_irqs(adapter);
1022
1023         if (err)
1024                 hw_dbg(&adapter->hw,
1025                        "request_irq failed, Error %d\n", err);
1026
1027         return err;
1028 }
1029
1030 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1031 {
1032         int i, q_vectors;
1033
1034         q_vectors = adapter->num_msix_vectors;
1035         i = q_vectors - 1;
1036
1037         free_irq(adapter->msix_entries[i].vector, adapter);
1038         i--;
1039
1040         for (; i >= 0; i--) {
1041                 /* free only the irqs that were actually requested */
1042                 if (!adapter->q_vector[i]->rx.ring &&
1043                     !adapter->q_vector[i]->tx.ring)
1044                         continue;
1045
1046                 free_irq(adapter->msix_entries[i].vector,
1047                          adapter->q_vector[i]);
1048         }
1049
1050         ixgbevf_reset_q_vectors(adapter);
1051 }
1052
1053 /**
1054  * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1055  * @adapter: board private structure
1056  **/
1057 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1058 {
1059         struct ixgbe_hw *hw = &adapter->hw;
1060         int i;
1061
1062         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1063         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1064         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1065
1066         IXGBE_WRITE_FLUSH(hw);
1067
1068         for (i = 0; i < adapter->num_msix_vectors; i++)
1069                 synchronize_irq(adapter->msix_entries[i].vector);
1070 }
1071
1072 /**
1073  * ixgbevf_irq_enable - Enable default interrupt generation settings
1074  * @adapter: board private structure
1075  **/
1076 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1077 {
1078         struct ixgbe_hw *hw = &adapter->hw;
1079
1080         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1081         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1082         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1083 }
1084
1085 /**
1086  * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1087  * @adapter: board private structure
1088  * @ring: structure containing ring specific data
1089  *
1090  * Configure the Tx descriptor ring after a reset.
1091  **/
1092 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1093                                       struct ixgbevf_ring *ring)
1094 {
1095         struct ixgbe_hw *hw = &adapter->hw;
1096         u64 tdba = ring->dma;
1097         int wait_loop = 10;
1098         u32 txdctl = IXGBE_TXDCTL_ENABLE;
1099         u8 reg_idx = ring->reg_idx;
1100
1101         /* disable queue to avoid issues while updating state */
1102         IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1103         IXGBE_WRITE_FLUSH(hw);
1104
1105         IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1106         IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1107         IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1108                         ring->count * sizeof(union ixgbe_adv_tx_desc));
1109
1110         /* disable head writeback */
1111         IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1112         IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1113
1114         /* enable relaxed ordering */
1115         IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1116                         (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1117                          IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1118
1119         /* reset head and tail pointers */
1120         IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1121         IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1122         ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
1123
1124         /* reset ntu and ntc to place SW in sync with hardwdare */
1125         ring->next_to_clean = 0;
1126         ring->next_to_use = 0;
1127
1128         /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1129          * to or less than the number of on chip descriptors, which is
1130          * currently 40.
1131          */
1132         txdctl |= (8 << 16);    /* WTHRESH = 8 */
1133
1134         /* Setting PTHRESH to 32 both improves performance */
1135         txdctl |= (1 << 8) |    /* HTHRESH = 1 */
1136                   32;          /* PTHRESH = 32 */
1137
1138         IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1139
1140         /* poll to verify queue is enabled */
1141         do {
1142                 usleep_range(1000, 2000);
1143                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1144         }  while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1145         if (!wait_loop)
1146                 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1147 }
1148
1149 /**
1150  * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1151  * @adapter: board private structure
1152  *
1153  * Configure the Tx unit of the MAC after a reset.
1154  **/
1155 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1156 {
1157         u32 i;
1158
1159         /* Setup the HW Tx Head and Tail descriptor pointers */
1160         for (i = 0; i < adapter->num_tx_queues; i++)
1161                 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1162 }
1163
1164 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1165
1166 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1167 {
1168         struct ixgbevf_ring *rx_ring;
1169         struct ixgbe_hw *hw = &adapter->hw;
1170         u32 srrctl;
1171
1172         rx_ring = adapter->rx_ring[index];
1173
1174         srrctl = IXGBE_SRRCTL_DROP_EN;
1175
1176         srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1177
1178         srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1179                   IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1180
1181         IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1182 }
1183
1184 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1185 {
1186         struct ixgbe_hw *hw = &adapter->hw;
1187
1188         /* PSRTYPE must be initialized in 82599 */
1189         u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1190                       IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1191                       IXGBE_PSRTYPE_L2HDR;
1192
1193         if (adapter->num_rx_queues > 1)
1194                 psrtype |= 1 << 29;
1195
1196         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1197 }
1198
1199 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1200 {
1201         struct ixgbe_hw *hw = &adapter->hw;
1202         struct net_device *netdev = adapter->netdev;
1203         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1204         int i;
1205         u16 rx_buf_len;
1206
1207         /* notify the PF of our intent to use this size of frame */
1208         ixgbevf_rlpml_set_vf(hw, max_frame);
1209
1210         /* PF will allow an extra 4 bytes past for vlan tagged frames */
1211         max_frame += VLAN_HLEN;
1212
1213         /*
1214          * Allocate buffer sizes that fit well into 32K and
1215          * take into account max frame size of 9.5K
1216          */
1217         if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1218             (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1219                 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1220         else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1221                 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1222         else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1223                 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1224         else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1225                 rx_buf_len = IXGBEVF_RXBUFFER_8K;
1226         else
1227                 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1228
1229         for (i = 0; i < adapter->num_rx_queues; i++)
1230                 adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
1231 }
1232
1233 #define IXGBEVF_MAX_RX_DESC_POLL 10
1234 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1235                                      struct ixgbevf_ring *ring)
1236 {
1237         struct ixgbe_hw *hw = &adapter->hw;
1238         int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1239         u32 rxdctl;
1240         u8 reg_idx = ring->reg_idx;
1241
1242         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1243         rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1244
1245         /* write value back with RXDCTL.ENABLE bit cleared */
1246         IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1247
1248         /* the hardware may take up to 100us to really disable the rx queue */
1249         do {
1250                 udelay(10);
1251                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1252         } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1253
1254         if (!wait_loop)
1255                 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1256                        reg_idx);
1257 }
1258
1259 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1260                                          struct ixgbevf_ring *ring)
1261 {
1262         struct ixgbe_hw *hw = &adapter->hw;
1263         int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1264         u32 rxdctl;
1265         u8 reg_idx = ring->reg_idx;
1266
1267         do {
1268                 usleep_range(1000, 2000);
1269                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1270         } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1271
1272         if (!wait_loop)
1273                 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1274                        reg_idx);
1275 }
1276
1277 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1278                                       struct ixgbevf_ring *ring)
1279 {
1280         struct ixgbe_hw *hw = &adapter->hw;
1281         u64 rdba = ring->dma;
1282         u32 rxdctl;
1283         u8 reg_idx = ring->reg_idx;
1284
1285         /* disable queue to avoid issues while updating state */
1286         rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1287         ixgbevf_disable_rx_queue(adapter, ring);
1288
1289         IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1290         IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1291         IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1292                         ring->count * sizeof(union ixgbe_adv_rx_desc));
1293
1294         /* enable relaxed ordering */
1295         IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1296                         IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1297
1298         /* reset head and tail pointers */
1299         IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1300         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1301         ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
1302
1303         /* reset ntu and ntc to place SW in sync with hardwdare */
1304         ring->next_to_clean = 0;
1305         ring->next_to_use = 0;
1306
1307         ixgbevf_configure_srrctl(adapter, reg_idx);
1308
1309         /* prevent DMA from exceeding buffer space available */
1310         rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1311         rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
1312         rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1313         IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1314
1315         ixgbevf_rx_desc_queue_enable(adapter, ring);
1316         ixgbevf_alloc_rx_buffers(adapter, ring, ixgbevf_desc_unused(ring));
1317 }
1318
1319 /**
1320  * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1321  * @adapter: board private structure
1322  *
1323  * Configure the Rx unit of the MAC after a reset.
1324  **/
1325 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1326 {
1327         int i;
1328
1329         ixgbevf_setup_psrtype(adapter);
1330
1331         /* set_rx_buffer_len must be called before ring initialization */
1332         ixgbevf_set_rx_buffer_len(adapter);
1333
1334         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1335          * the Base and Length of the Rx Descriptor Ring */
1336         for (i = 0; i < adapter->num_rx_queues; i++)
1337                 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1338 }
1339
1340 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1341                                    __be16 proto, u16 vid)
1342 {
1343         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1344         struct ixgbe_hw *hw = &adapter->hw;
1345         int err;
1346
1347         spin_lock_bh(&adapter->mbx_lock);
1348
1349         /* add VID to filter table */
1350         err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1351
1352         spin_unlock_bh(&adapter->mbx_lock);
1353
1354         /* translate error return types so error makes sense */
1355         if (err == IXGBE_ERR_MBX)
1356                 return -EIO;
1357
1358         if (err == IXGBE_ERR_INVALID_ARGUMENT)
1359                 return -EACCES;
1360
1361         set_bit(vid, adapter->active_vlans);
1362
1363         return err;
1364 }
1365
1366 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1367                                     __be16 proto, u16 vid)
1368 {
1369         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1370         struct ixgbe_hw *hw = &adapter->hw;
1371         int err = -EOPNOTSUPP;
1372
1373         spin_lock_bh(&adapter->mbx_lock);
1374
1375         /* remove VID from filter table */
1376         err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1377
1378         spin_unlock_bh(&adapter->mbx_lock);
1379
1380         clear_bit(vid, adapter->active_vlans);
1381
1382         return err;
1383 }
1384
1385 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1386 {
1387         u16 vid;
1388
1389         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1390                 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1391                                         htons(ETH_P_8021Q), vid);
1392 }
1393
1394 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1395 {
1396         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1397         struct ixgbe_hw *hw = &adapter->hw;
1398         int count = 0;
1399
1400         if ((netdev_uc_count(netdev)) > 10) {
1401                 pr_err("Too many unicast filters - No Space\n");
1402                 return -ENOSPC;
1403         }
1404
1405         if (!netdev_uc_empty(netdev)) {
1406                 struct netdev_hw_addr *ha;
1407                 netdev_for_each_uc_addr(ha, netdev) {
1408                         hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1409                         udelay(200);
1410                 }
1411         } else {
1412                 /*
1413                  * If the list is empty then send message to PF driver to
1414                  * clear all macvlans on this VF.
1415                  */
1416                 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1417         }
1418
1419         return count;
1420 }
1421
1422 /**
1423  * ixgbevf_set_rx_mode - Multicast and unicast set
1424  * @netdev: network interface device structure
1425  *
1426  * The set_rx_method entry point is called whenever the multicast address
1427  * list, unicast address list or the network interface flags are updated.
1428  * This routine is responsible for configuring the hardware for proper
1429  * multicast mode and configuring requested unicast filters.
1430  **/
1431 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1432 {
1433         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1434         struct ixgbe_hw *hw = &adapter->hw;
1435
1436         spin_lock_bh(&adapter->mbx_lock);
1437
1438         /* reprogram multicast list */
1439         hw->mac.ops.update_mc_addr_list(hw, netdev);
1440
1441         ixgbevf_write_uc_addr_list(netdev);
1442
1443         spin_unlock_bh(&adapter->mbx_lock);
1444 }
1445
1446 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1447 {
1448         int q_idx;
1449         struct ixgbevf_q_vector *q_vector;
1450         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1451
1452         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1453                 q_vector = adapter->q_vector[q_idx];
1454 #ifdef CONFIG_NET_RX_BUSY_POLL
1455                 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1456 #endif
1457                 napi_enable(&q_vector->napi);
1458         }
1459 }
1460
1461 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1462 {
1463         int q_idx;
1464         struct ixgbevf_q_vector *q_vector;
1465         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1466
1467         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1468                 q_vector = adapter->q_vector[q_idx];
1469                 napi_disable(&q_vector->napi);
1470 #ifdef CONFIG_NET_RX_BUSY_POLL
1471                 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1472                         pr_info("QV %d locked\n", q_idx);
1473                         usleep_range(1000, 20000);
1474                 }
1475 #endif /* CONFIG_NET_RX_BUSY_POLL */
1476         }
1477 }
1478
1479 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1480 {
1481         struct ixgbe_hw *hw = &adapter->hw;
1482         unsigned int def_q = 0;
1483         unsigned int num_tcs = 0;
1484         unsigned int num_rx_queues = 1;
1485         int err;
1486
1487         spin_lock_bh(&adapter->mbx_lock);
1488
1489         /* fetch queue configuration from the PF */
1490         err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1491
1492         spin_unlock_bh(&adapter->mbx_lock);
1493
1494         if (err)
1495                 return err;
1496
1497         if (num_tcs > 1) {
1498                 /* update default Tx ring register index */
1499                 adapter->tx_ring[0]->reg_idx = def_q;
1500
1501                 /* we need as many queues as traffic classes */
1502                 num_rx_queues = num_tcs;
1503         }
1504
1505         /* if we have a bad config abort request queue reset */
1506         if (adapter->num_rx_queues != num_rx_queues) {
1507                 /* force mailbox timeout to prevent further messages */
1508                 hw->mbx.timeout = 0;
1509
1510                 /* wait for watchdog to come around and bail us out */
1511                 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1512         }
1513
1514         return 0;
1515 }
1516
1517 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1518 {
1519         ixgbevf_configure_dcb(adapter);
1520
1521         ixgbevf_set_rx_mode(adapter->netdev);
1522
1523         ixgbevf_restore_vlan(adapter);
1524
1525         ixgbevf_configure_tx(adapter);
1526         ixgbevf_configure_rx(adapter);
1527 }
1528
1529 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1530 {
1531         /* Only save pre-reset stats if there are some */
1532         if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1533                 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1534                         adapter->stats.base_vfgprc;
1535                 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1536                         adapter->stats.base_vfgptc;
1537                 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1538                         adapter->stats.base_vfgorc;
1539                 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1540                         adapter->stats.base_vfgotc;
1541                 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1542                         adapter->stats.base_vfmprc;
1543         }
1544 }
1545
1546 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1547 {
1548         struct ixgbe_hw *hw = &adapter->hw;
1549
1550         adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1551         adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1552         adapter->stats.last_vfgorc |=
1553                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1554         adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1555         adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1556         adapter->stats.last_vfgotc |=
1557                 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1558         adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1559
1560         adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1561         adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1562         adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1563         adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1564         adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1565 }
1566
1567 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1568 {
1569         struct ixgbe_hw *hw = &adapter->hw;
1570         int api[] = { ixgbe_mbox_api_11,
1571                       ixgbe_mbox_api_10,
1572                       ixgbe_mbox_api_unknown };
1573         int err = 0, idx = 0;
1574
1575         spin_lock_bh(&adapter->mbx_lock);
1576
1577         while (api[idx] != ixgbe_mbox_api_unknown) {
1578                 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1579                 if (!err)
1580                         break;
1581                 idx++;
1582         }
1583
1584         spin_unlock_bh(&adapter->mbx_lock);
1585 }
1586
1587 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1588 {
1589         struct net_device *netdev = adapter->netdev;
1590         struct ixgbe_hw *hw = &adapter->hw;
1591
1592         ixgbevf_configure_msix(adapter);
1593
1594         spin_lock_bh(&adapter->mbx_lock);
1595
1596         if (is_valid_ether_addr(hw->mac.addr))
1597                 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1598         else
1599                 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1600
1601         spin_unlock_bh(&adapter->mbx_lock);
1602
1603         clear_bit(__IXGBEVF_DOWN, &adapter->state);
1604         ixgbevf_napi_enable_all(adapter);
1605
1606         /* enable transmits */
1607         netif_tx_start_all_queues(netdev);
1608
1609         ixgbevf_save_reset_stats(adapter);
1610         ixgbevf_init_last_counter_stats(adapter);
1611
1612         hw->mac.get_link_status = 1;
1613         mod_timer(&adapter->watchdog_timer, jiffies);
1614 }
1615
1616 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1617 {
1618         struct ixgbe_hw *hw = &adapter->hw;
1619
1620         ixgbevf_configure(adapter);
1621
1622         ixgbevf_up_complete(adapter);
1623
1624         /* clear any pending interrupts, may auto mask */
1625         IXGBE_READ_REG(hw, IXGBE_VTEICR);
1626
1627         ixgbevf_irq_enable(adapter);
1628 }
1629
1630 /**
1631  * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1632  * @rx_ring: ring to free buffers from
1633  **/
1634 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
1635 {
1636         unsigned long size;
1637         unsigned int i;
1638
1639         if (!rx_ring->rx_buffer_info)
1640                 return;
1641
1642         /* Free all the Rx ring sk_buffs */
1643         for (i = 0; i < rx_ring->count; i++) {
1644                 struct ixgbevf_rx_buffer *rx_buffer_info;
1645
1646                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1647                 if (rx_buffer_info->dma) {
1648                         dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
1649                                          rx_ring->rx_buf_len,
1650                                          DMA_FROM_DEVICE);
1651                         rx_buffer_info->dma = 0;
1652                 }
1653                 if (rx_buffer_info->skb) {
1654                         struct sk_buff *skb = rx_buffer_info->skb;
1655                         rx_buffer_info->skb = NULL;
1656                         do {
1657                                 struct sk_buff *this = skb;
1658                                 skb = IXGBE_CB(skb)->prev;
1659                                 dev_kfree_skb(this);
1660                         } while (skb);
1661                 }
1662         }
1663
1664         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1665         memset(rx_ring->rx_buffer_info, 0, size);
1666
1667         /* Zero out the descriptor ring */
1668         memset(rx_ring->desc, 0, rx_ring->size);
1669 }
1670
1671 /**
1672  * ixgbevf_clean_tx_ring - Free Tx Buffers
1673  * @tx_ring: ring to be cleaned
1674  **/
1675 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
1676 {
1677         struct ixgbevf_tx_buffer *tx_buffer_info;
1678         unsigned long size;
1679         unsigned int i;
1680
1681         if (!tx_ring->tx_buffer_info)
1682                 return;
1683
1684         /* Free all the Tx ring sk_buffs */
1685         for (i = 0; i < tx_ring->count; i++) {
1686                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1687                 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1688         }
1689
1690         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1691         memset(tx_ring->tx_buffer_info, 0, size);
1692
1693         memset(tx_ring->desc, 0, tx_ring->size);
1694 }
1695
1696 /**
1697  * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1698  * @adapter: board private structure
1699  **/
1700 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1701 {
1702         int i;
1703
1704         for (i = 0; i < adapter->num_rx_queues; i++)
1705                 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
1706 }
1707
1708 /**
1709  * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1710  * @adapter: board private structure
1711  **/
1712 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1713 {
1714         int i;
1715
1716         for (i = 0; i < adapter->num_tx_queues; i++)
1717                 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
1718 }
1719
1720 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1721 {
1722         struct net_device *netdev = adapter->netdev;
1723         struct ixgbe_hw *hw = &adapter->hw;
1724         int i;
1725
1726         /* signal that we are down to the interrupt handler */
1727         set_bit(__IXGBEVF_DOWN, &adapter->state);
1728
1729         /* disable all enabled rx queues */
1730         for (i = 0; i < adapter->num_rx_queues; i++)
1731                 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
1732
1733         netif_tx_disable(netdev);
1734
1735         msleep(10);
1736
1737         netif_tx_stop_all_queues(netdev);
1738
1739         ixgbevf_irq_disable(adapter);
1740
1741         ixgbevf_napi_disable_all(adapter);
1742
1743         del_timer_sync(&adapter->watchdog_timer);
1744         /* can't call flush scheduled work here because it can deadlock
1745          * if linkwatch_event tries to acquire the rtnl_lock which we are
1746          * holding */
1747         while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1748                 msleep(1);
1749
1750         /* disable transmits in the hardware now that interrupts are off */
1751         for (i = 0; i < adapter->num_tx_queues; i++) {
1752                 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
1753
1754                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
1755                                 IXGBE_TXDCTL_SWFLSH);
1756         }
1757
1758         netif_carrier_off(netdev);
1759
1760         if (!pci_channel_offline(adapter->pdev))
1761                 ixgbevf_reset(adapter);
1762
1763         ixgbevf_clean_all_tx_rings(adapter);
1764         ixgbevf_clean_all_rx_rings(adapter);
1765 }
1766
1767 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1768 {
1769         WARN_ON(in_interrupt());
1770
1771         while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1772                 msleep(1);
1773
1774         ixgbevf_down(adapter);
1775         ixgbevf_up(adapter);
1776
1777         clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1778 }
1779
1780 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1781 {
1782         struct ixgbe_hw *hw = &adapter->hw;
1783         struct net_device *netdev = adapter->netdev;
1784
1785         if (hw->mac.ops.reset_hw(hw)) {
1786                 hw_dbg(hw, "PF still resetting\n");
1787         } else {
1788                 hw->mac.ops.init_hw(hw);
1789                 ixgbevf_negotiate_api(adapter);
1790         }
1791
1792         if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1793                 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1794                        netdev->addr_len);
1795                 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1796                        netdev->addr_len);
1797         }
1798 }
1799
1800 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1801                                         int vectors)
1802 {
1803         int err = 0;
1804         int vector_threshold;
1805
1806         /* We'll want at least 2 (vector_threshold):
1807          * 1) TxQ[0] + RxQ[0] handler
1808          * 2) Other (Link Status Change, etc.)
1809          */
1810         vector_threshold = MIN_MSIX_COUNT;
1811
1812         /* The more we get, the more we will assign to Tx/Rx Cleanup
1813          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1814          * Right now, we simply care about how many we'll get; we'll
1815          * set them up later while requesting irq's.
1816          */
1817         while (vectors >= vector_threshold) {
1818                 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1819                                       vectors);
1820                 if (!err || err < 0) /* Success or a nasty failure. */
1821                         break;
1822                 else /* err == number of vectors we should try again with */
1823                         vectors = err;
1824         }
1825
1826         if (vectors < vector_threshold)
1827                 err = -ENOMEM;
1828
1829         if (err) {
1830                 dev_err(&adapter->pdev->dev,
1831                         "Unable to allocate MSI-X interrupts\n");
1832                 kfree(adapter->msix_entries);
1833                 adapter->msix_entries = NULL;
1834         } else {
1835                 /*
1836                  * Adjust for only the vectors we'll use, which is minimum
1837                  * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1838                  * vectors we were allocated.
1839                  */
1840                 adapter->num_msix_vectors = vectors;
1841         }
1842
1843         return err;
1844 }
1845
1846 /**
1847  * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1848  * @adapter: board private structure to initialize
1849  *
1850  * This is the top level queue allocation routine.  The order here is very
1851  * important, starting with the "most" number of features turned on at once,
1852  * and ending with the smallest set of features.  This way large combinations
1853  * can be allocated if they're turned on, and smaller combinations are the
1854  * fallthrough conditions.
1855  *
1856  **/
1857 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1858 {
1859         struct ixgbe_hw *hw = &adapter->hw;
1860         unsigned int def_q = 0;
1861         unsigned int num_tcs = 0;
1862         int err;
1863
1864         /* Start with base case */
1865         adapter->num_rx_queues = 1;
1866         adapter->num_tx_queues = 1;
1867
1868         spin_lock_bh(&adapter->mbx_lock);
1869
1870         /* fetch queue configuration from the PF */
1871         err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1872
1873         spin_unlock_bh(&adapter->mbx_lock);
1874
1875         if (err)
1876                 return;
1877
1878         /* we need as many queues as traffic classes */
1879         if (num_tcs > 1)
1880                 adapter->num_rx_queues = num_tcs;
1881 }
1882
1883 /**
1884  * ixgbevf_alloc_queues - Allocate memory for all rings
1885  * @adapter: board private structure to initialize
1886  *
1887  * We allocate one ring per queue at run-time since we don't know the
1888  * number of queues at compile-time.  The polling_netdev array is
1889  * intended for Multiqueue, but should work fine with a single queue.
1890  **/
1891 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1892 {
1893         struct ixgbevf_ring *ring;
1894         int rx = 0, tx = 0;
1895
1896         for (; tx < adapter->num_tx_queues; tx++) {
1897                 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1898                 if (!ring)
1899                         goto err_allocation;
1900
1901                 ring->dev = &adapter->pdev->dev;
1902                 ring->netdev = adapter->netdev;
1903                 ring->count = adapter->tx_ring_count;
1904                 ring->queue_index = tx;
1905                 ring->reg_idx = tx;
1906
1907                 adapter->tx_ring[tx] = ring;
1908         }
1909
1910         for (; rx < adapter->num_rx_queues; rx++) {
1911                 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1912                 if (!ring)
1913                         goto err_allocation;
1914
1915                 ring->dev = &adapter->pdev->dev;
1916                 ring->netdev = adapter->netdev;
1917
1918                 ring->count = adapter->rx_ring_count;
1919                 ring->queue_index = rx;
1920                 ring->reg_idx = rx;
1921
1922                 adapter->rx_ring[rx] = ring;
1923         }
1924
1925         return 0;
1926
1927 err_allocation:
1928         while (tx) {
1929                 kfree(adapter->tx_ring[--tx]);
1930                 adapter->tx_ring[tx] = NULL;
1931         }
1932
1933         while (rx) {
1934                 kfree(adapter->rx_ring[--rx]);
1935                 adapter->rx_ring[rx] = NULL;
1936         }
1937         return -ENOMEM;
1938 }
1939
1940 /**
1941  * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1942  * @adapter: board private structure to initialize
1943  *
1944  * Attempt to configure the interrupts using the best available
1945  * capabilities of the hardware and the kernel.
1946  **/
1947 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1948 {
1949         struct net_device *netdev = adapter->netdev;
1950         int err = 0;
1951         int vector, v_budget;
1952
1953         /*
1954          * It's easy to be greedy for MSI-X vectors, but it really
1955          * doesn't do us much good if we have a lot more vectors
1956          * than CPU's.  So let's be conservative and only ask for
1957          * (roughly) the same number of vectors as there are CPU's.
1958          * The default is to use pairs of vectors.
1959          */
1960         v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1961         v_budget = min_t(int, v_budget, num_online_cpus());
1962         v_budget += NON_Q_VECTORS;
1963
1964         /* A failure in MSI-X entry allocation isn't fatal, but it does
1965          * mean we disable MSI-X capabilities of the adapter. */
1966         adapter->msix_entries = kcalloc(v_budget,
1967                                         sizeof(struct msix_entry), GFP_KERNEL);
1968         if (!adapter->msix_entries) {
1969                 err = -ENOMEM;
1970                 goto out;
1971         }
1972
1973         for (vector = 0; vector < v_budget; vector++)
1974                 adapter->msix_entries[vector].entry = vector;
1975
1976         err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1977         if (err)
1978                 goto out;
1979
1980         err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1981         if (err)
1982                 goto out;
1983
1984         err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1985
1986 out:
1987         return err;
1988 }
1989
1990 /**
1991  * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1992  * @adapter: board private structure to initialize
1993  *
1994  * We allocate one q_vector per queue interrupt.  If allocation fails we
1995  * return -ENOMEM.
1996  **/
1997 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1998 {
1999         int q_idx, num_q_vectors;
2000         struct ixgbevf_q_vector *q_vector;
2001
2002         num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2003
2004         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2005                 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2006                 if (!q_vector)
2007                         goto err_out;
2008                 q_vector->adapter = adapter;
2009                 q_vector->v_idx = q_idx;
2010                 netif_napi_add(adapter->netdev, &q_vector->napi,
2011                                ixgbevf_poll, 64);
2012 #ifdef CONFIG_NET_RX_BUSY_POLL
2013                 napi_hash_add(&q_vector->napi);
2014 #endif
2015                 adapter->q_vector[q_idx] = q_vector;
2016         }
2017
2018         return 0;
2019
2020 err_out:
2021         while (q_idx) {
2022                 q_idx--;
2023                 q_vector = adapter->q_vector[q_idx];
2024 #ifdef CONFIG_NET_RX_BUSY_POLL
2025                 napi_hash_del(&q_vector->napi);
2026 #endif
2027                 netif_napi_del(&q_vector->napi);
2028                 kfree(q_vector);
2029                 adapter->q_vector[q_idx] = NULL;
2030         }
2031         return -ENOMEM;
2032 }
2033
2034 /**
2035  * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2036  * @adapter: board private structure to initialize
2037  *
2038  * This function frees the memory allocated to the q_vectors.  In addition if
2039  * NAPI is enabled it will delete any references to the NAPI struct prior
2040  * to freeing the q_vector.
2041  **/
2042 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2043 {
2044         int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2045
2046         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2047                 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2048
2049                 adapter->q_vector[q_idx] = NULL;
2050 #ifdef CONFIG_NET_RX_BUSY_POLL
2051                 napi_hash_del(&q_vector->napi);
2052 #endif
2053                 netif_napi_del(&q_vector->napi);
2054                 kfree(q_vector);
2055         }
2056 }
2057
2058 /**
2059  * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2060  * @adapter: board private structure
2061  *
2062  **/
2063 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2064 {
2065         pci_disable_msix(adapter->pdev);
2066         kfree(adapter->msix_entries);
2067         adapter->msix_entries = NULL;
2068 }
2069
2070 /**
2071  * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2072  * @adapter: board private structure to initialize
2073  *
2074  **/
2075 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2076 {
2077         int err;
2078
2079         /* Number of supported queues */
2080         ixgbevf_set_num_queues(adapter);
2081
2082         err = ixgbevf_set_interrupt_capability(adapter);
2083         if (err) {
2084                 hw_dbg(&adapter->hw,
2085                        "Unable to setup interrupt capabilities\n");
2086                 goto err_set_interrupt;
2087         }
2088
2089         err = ixgbevf_alloc_q_vectors(adapter);
2090         if (err) {
2091                 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2092                        "vectors\n");
2093                 goto err_alloc_q_vectors;
2094         }
2095
2096         err = ixgbevf_alloc_queues(adapter);
2097         if (err) {
2098                 pr_err("Unable to allocate memory for queues\n");
2099                 goto err_alloc_queues;
2100         }
2101
2102         hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2103                "Tx Queue count = %u\n",
2104                (adapter->num_rx_queues > 1) ? "Enabled" :
2105                "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2106
2107         set_bit(__IXGBEVF_DOWN, &adapter->state);
2108
2109         return 0;
2110 err_alloc_queues:
2111         ixgbevf_free_q_vectors(adapter);
2112 err_alloc_q_vectors:
2113         ixgbevf_reset_interrupt_capability(adapter);
2114 err_set_interrupt:
2115         return err;
2116 }
2117
2118 /**
2119  * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2120  * @adapter: board private structure to clear interrupt scheme on
2121  *
2122  * We go through and clear interrupt specific resources and reset the structure
2123  * to pre-load conditions
2124  **/
2125 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2126 {
2127         int i;
2128
2129         for (i = 0; i < adapter->num_tx_queues; i++) {
2130                 kfree(adapter->tx_ring[i]);
2131                 adapter->tx_ring[i] = NULL;
2132         }
2133         for (i = 0; i < adapter->num_rx_queues; i++) {
2134                 kfree(adapter->rx_ring[i]);
2135                 adapter->rx_ring[i] = NULL;
2136         }
2137
2138         adapter->num_tx_queues = 0;
2139         adapter->num_rx_queues = 0;
2140
2141         ixgbevf_free_q_vectors(adapter);
2142         ixgbevf_reset_interrupt_capability(adapter);
2143 }
2144
2145 /**
2146  * ixgbevf_sw_init - Initialize general software structures
2147  * (struct ixgbevf_adapter)
2148  * @adapter: board private structure to initialize
2149  *
2150  * ixgbevf_sw_init initializes the Adapter private data structure.
2151  * Fields are initialized based on PCI device information and
2152  * OS network device settings (MTU size).
2153  **/
2154 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2155 {
2156         struct ixgbe_hw *hw = &adapter->hw;
2157         struct pci_dev *pdev = adapter->pdev;
2158         struct net_device *netdev = adapter->netdev;
2159         int err;
2160
2161         /* PCI config space info */
2162
2163         hw->vendor_id = pdev->vendor;
2164         hw->device_id = pdev->device;
2165         hw->revision_id = pdev->revision;
2166         hw->subsystem_vendor_id = pdev->subsystem_vendor;
2167         hw->subsystem_device_id = pdev->subsystem_device;
2168
2169         hw->mbx.ops.init_params(hw);
2170
2171         /* assume legacy case in which PF would only give VF 2 queues */
2172         hw->mac.max_tx_queues = 2;
2173         hw->mac.max_rx_queues = 2;
2174
2175         /* lock to protect mailbox accesses */
2176         spin_lock_init(&adapter->mbx_lock);
2177
2178         err = hw->mac.ops.reset_hw(hw);
2179         if (err) {
2180                 dev_info(&pdev->dev,
2181                          "PF still in reset state.  Is the PF interface up?\n");
2182         } else {
2183                 err = hw->mac.ops.init_hw(hw);
2184                 if (err) {
2185                         pr_err("init_shared_code failed: %d\n", err);
2186                         goto out;
2187                 }
2188                 ixgbevf_negotiate_api(adapter);
2189                 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2190                 if (err)
2191                         dev_info(&pdev->dev, "Error reading MAC address\n");
2192                 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2193                         dev_info(&pdev->dev,
2194                                  "MAC address not assigned by administrator.\n");
2195                 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2196         }
2197
2198         if (!is_valid_ether_addr(netdev->dev_addr)) {
2199                 dev_info(&pdev->dev, "Assigning random MAC address\n");
2200                 eth_hw_addr_random(netdev);
2201                 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2202         }
2203
2204         /* Enable dynamic interrupt throttling rates */
2205         adapter->rx_itr_setting = 1;
2206         adapter->tx_itr_setting = 1;
2207
2208         /* set default ring sizes */
2209         adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2210         adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2211
2212         set_bit(__IXGBEVF_DOWN, &adapter->state);
2213         return 0;
2214
2215 out:
2216         return err;
2217 }
2218
2219 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)     \
2220         {                                                       \
2221                 u32 current_counter = IXGBE_READ_REG(hw, reg);  \
2222                 if (current_counter < last_counter)             \
2223                         counter += 0x100000000LL;               \
2224                 last_counter = current_counter;                 \
2225                 counter &= 0xFFFFFFFF00000000LL;                \
2226                 counter |= current_counter;                     \
2227         }
2228
2229 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2230         {                                                                \
2231                 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);   \
2232                 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);   \
2233                 u64 current_counter = (current_counter_msb << 32) |      \
2234                         current_counter_lsb;                             \
2235                 if (current_counter < last_counter)                      \
2236                         counter += 0x1000000000LL;                       \
2237                 last_counter = current_counter;                          \
2238                 counter &= 0xFFFFFFF000000000LL;                         \
2239                 counter |= current_counter;                              \
2240         }
2241 /**
2242  * ixgbevf_update_stats - Update the board statistics counters.
2243  * @adapter: board private structure
2244  **/
2245 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2246 {
2247         struct ixgbe_hw *hw = &adapter->hw;
2248         int i;
2249
2250         if (!adapter->link_up)
2251                 return;
2252
2253         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2254                                 adapter->stats.vfgprc);
2255         UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2256                                 adapter->stats.vfgptc);
2257         UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2258                                 adapter->stats.last_vfgorc,
2259                                 adapter->stats.vfgorc);
2260         UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2261                                 adapter->stats.last_vfgotc,
2262                                 adapter->stats.vfgotc);
2263         UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2264                                 adapter->stats.vfmprc);
2265
2266         for (i = 0;  i  < adapter->num_rx_queues;  i++) {
2267                 adapter->hw_csum_rx_error +=
2268                         adapter->rx_ring[i]->hw_csum_rx_error;
2269                 adapter->hw_csum_rx_good +=
2270                         adapter->rx_ring[i]->hw_csum_rx_good;
2271                 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2272                 adapter->rx_ring[i]->hw_csum_rx_good = 0;
2273         }
2274 }
2275
2276 /**
2277  * ixgbevf_watchdog - Timer Call-back
2278  * @data: pointer to adapter cast into an unsigned long
2279  **/
2280 static void ixgbevf_watchdog(unsigned long data)
2281 {
2282         struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2283         struct ixgbe_hw *hw = &adapter->hw;
2284         u32 eics = 0;
2285         int i;
2286
2287         /*
2288          * Do the watchdog outside of interrupt context due to the lovely
2289          * delays that some of the newer hardware requires
2290          */
2291
2292         if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2293                 goto watchdog_short_circuit;
2294
2295         /* get one bit for every active tx/rx interrupt vector */
2296         for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2297                 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2298                 if (qv->rx.ring || qv->tx.ring)
2299                         eics |= 1 << i;
2300         }
2301
2302         IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2303
2304 watchdog_short_circuit:
2305         schedule_work(&adapter->watchdog_task);
2306 }
2307
2308 /**
2309  * ixgbevf_tx_timeout - Respond to a Tx Hang
2310  * @netdev: network interface device structure
2311  **/
2312 static void ixgbevf_tx_timeout(struct net_device *netdev)
2313 {
2314         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2315
2316         /* Do the reset outside of interrupt context */
2317         schedule_work(&adapter->reset_task);
2318 }
2319
2320 static void ixgbevf_reset_task(struct work_struct *work)
2321 {
2322         struct ixgbevf_adapter *adapter;
2323         adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2324
2325         /* If we're already down or resetting, just bail */
2326         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2327             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2328                 return;
2329
2330         adapter->tx_timeout_count++;
2331
2332         ixgbevf_reinit_locked(adapter);
2333 }
2334
2335 /**
2336  * ixgbevf_watchdog_task - worker thread to bring link up
2337  * @work: pointer to work_struct containing our data
2338  **/
2339 static void ixgbevf_watchdog_task(struct work_struct *work)
2340 {
2341         struct ixgbevf_adapter *adapter = container_of(work,
2342                                                        struct ixgbevf_adapter,
2343                                                        watchdog_task);
2344         struct net_device *netdev = adapter->netdev;
2345         struct ixgbe_hw *hw = &adapter->hw;
2346         u32 link_speed = adapter->link_speed;
2347         bool link_up = adapter->link_up;
2348         s32 need_reset;
2349
2350         ixgbevf_queue_reset_subtask(adapter);
2351
2352         adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2353
2354         /*
2355          * Always check the link on the watchdog because we have
2356          * no LSC interrupt
2357          */
2358         spin_lock_bh(&adapter->mbx_lock);
2359
2360         need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2361
2362         spin_unlock_bh(&adapter->mbx_lock);
2363
2364         if (need_reset) {
2365                 adapter->link_up = link_up;
2366                 adapter->link_speed = link_speed;
2367                 netif_carrier_off(netdev);
2368                 netif_tx_stop_all_queues(netdev);
2369                 schedule_work(&adapter->reset_task);
2370                 goto pf_has_reset;
2371         }
2372         adapter->link_up = link_up;
2373         adapter->link_speed = link_speed;
2374
2375         if (link_up) {
2376                 if (!netif_carrier_ok(netdev)) {
2377                         char *link_speed_string;
2378                         switch (link_speed) {
2379                         case IXGBE_LINK_SPEED_10GB_FULL:
2380                                 link_speed_string = "10 Gbps";
2381                                 break;
2382                         case IXGBE_LINK_SPEED_1GB_FULL:
2383                                 link_speed_string = "1 Gbps";
2384                                 break;
2385                         case IXGBE_LINK_SPEED_100_FULL:
2386                                 link_speed_string = "100 Mbps";
2387                                 break;
2388                         default:
2389                                 link_speed_string = "unknown speed";
2390                                 break;
2391                         }
2392                         dev_info(&adapter->pdev->dev,
2393                                 "NIC Link is Up, %s\n", link_speed_string);
2394                         netif_carrier_on(netdev);
2395                         netif_tx_wake_all_queues(netdev);
2396                 }
2397         } else {
2398                 adapter->link_up = false;
2399                 adapter->link_speed = 0;
2400                 if (netif_carrier_ok(netdev)) {
2401                         dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2402                         netif_carrier_off(netdev);
2403                         netif_tx_stop_all_queues(netdev);
2404                 }
2405         }
2406
2407         ixgbevf_update_stats(adapter);
2408
2409 pf_has_reset:
2410         /* Reset the timer */
2411         if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2412                 mod_timer(&adapter->watchdog_timer,
2413                           round_jiffies(jiffies + (2 * HZ)));
2414
2415         adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2416 }
2417
2418 /**
2419  * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2420  * @tx_ring: Tx descriptor ring for a specific queue
2421  *
2422  * Free all transmit software resources
2423  **/
2424 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2425 {
2426         ixgbevf_clean_tx_ring(tx_ring);
2427
2428         vfree(tx_ring->tx_buffer_info);
2429         tx_ring->tx_buffer_info = NULL;
2430
2431         /* if not set, then don't free */
2432         if (!tx_ring->desc)
2433                 return;
2434
2435         dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2436                           tx_ring->dma);
2437
2438         tx_ring->desc = NULL;
2439 }
2440
2441 /**
2442  * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2443  * @adapter: board private structure
2444  *
2445  * Free all transmit software resources
2446  **/
2447 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2448 {
2449         int i;
2450
2451         for (i = 0; i < adapter->num_tx_queues; i++)
2452                 if (adapter->tx_ring[i]->desc)
2453                         ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2454 }
2455
2456 /**
2457  * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2458  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
2459  *
2460  * Return 0 on success, negative on failure
2461  **/
2462 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2463 {
2464         int size;
2465
2466         size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2467         tx_ring->tx_buffer_info = vzalloc(size);
2468         if (!tx_ring->tx_buffer_info)
2469                 goto err;
2470
2471         /* round up to nearest 4K */
2472         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2473         tx_ring->size = ALIGN(tx_ring->size, 4096);
2474
2475         tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2476                                            &tx_ring->dma, GFP_KERNEL);
2477         if (!tx_ring->desc)
2478                 goto err;
2479
2480         return 0;
2481
2482 err:
2483         vfree(tx_ring->tx_buffer_info);
2484         tx_ring->tx_buffer_info = NULL;
2485         hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2486                "descriptor ring\n");
2487         return -ENOMEM;
2488 }
2489
2490 /**
2491  * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2492  * @adapter: board private structure
2493  *
2494  * If this function returns with an error, then it's possible one or
2495  * more of the rings is populated (while the rest are not).  It is the
2496  * callers duty to clean those orphaned rings.
2497  *
2498  * Return 0 on success, negative on failure
2499  **/
2500 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2501 {
2502         int i, err = 0;
2503
2504         for (i = 0; i < adapter->num_tx_queues; i++) {
2505                 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
2506                 if (!err)
2507                         continue;
2508                 hw_dbg(&adapter->hw,
2509                        "Allocation for Tx Queue %u failed\n", i);
2510                 break;
2511         }
2512
2513         return err;
2514 }
2515
2516 /**
2517  * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2518  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
2519  *
2520  * Returns 0 on success, negative on failure
2521  **/
2522 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
2523 {
2524         int size;
2525
2526         size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2527         rx_ring->rx_buffer_info = vzalloc(size);
2528         if (!rx_ring->rx_buffer_info)
2529                 goto err;
2530
2531         /* Round up to nearest 4K */
2532         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2533         rx_ring->size = ALIGN(rx_ring->size, 4096);
2534
2535         rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2536                                            &rx_ring->dma, GFP_KERNEL);
2537
2538         if (!rx_ring->desc)
2539                 goto err;
2540
2541         return 0;
2542 err:
2543         vfree(rx_ring->rx_buffer_info);
2544         rx_ring->rx_buffer_info = NULL;
2545         dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
2546         return -ENOMEM;
2547 }
2548
2549 /**
2550  * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2551  * @adapter: board private structure
2552  *
2553  * If this function returns with an error, then it's possible one or
2554  * more of the rings is populated (while the rest are not).  It is the
2555  * callers duty to clean those orphaned rings.
2556  *
2557  * Return 0 on success, negative on failure
2558  **/
2559 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2560 {
2561         int i, err = 0;
2562
2563         for (i = 0; i < adapter->num_rx_queues; i++) {
2564                 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
2565                 if (!err)
2566                         continue;
2567                 hw_dbg(&adapter->hw,
2568                        "Allocation for Rx Queue %u failed\n", i);
2569                 break;
2570         }
2571         return err;
2572 }
2573
2574 /**
2575  * ixgbevf_free_rx_resources - Free Rx Resources
2576  * @rx_ring: ring to clean the resources from
2577  *
2578  * Free all receive software resources
2579  **/
2580 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
2581 {
2582         ixgbevf_clean_rx_ring(rx_ring);
2583
2584         vfree(rx_ring->rx_buffer_info);
2585         rx_ring->rx_buffer_info = NULL;
2586
2587         dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2588                           rx_ring->dma);
2589
2590         rx_ring->desc = NULL;
2591 }
2592
2593 /**
2594  * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2595  * @adapter: board private structure
2596  *
2597  * Free all receive software resources
2598  **/
2599 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2600 {
2601         int i;
2602
2603         for (i = 0; i < adapter->num_rx_queues; i++)
2604                 if (adapter->rx_ring[i]->desc)
2605                         ixgbevf_free_rx_resources(adapter->rx_ring[i]);
2606 }
2607
2608 /**
2609  * ixgbevf_open - Called when a network interface is made active
2610  * @netdev: network interface device structure
2611  *
2612  * Returns 0 on success, negative value on failure
2613  *
2614  * The open entry point is called when a network interface is made
2615  * active by the system (IFF_UP).  At this point all resources needed
2616  * for transmit and receive operations are allocated, the interrupt
2617  * handler is registered with the OS, the watchdog timer is started,
2618  * and the stack is notified that the interface is ready.
2619  **/
2620 static int ixgbevf_open(struct net_device *netdev)
2621 {
2622         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2623         struct ixgbe_hw *hw = &adapter->hw;
2624         int err;
2625
2626         /* A previous failure to open the device because of a lack of
2627          * available MSIX vector resources may have reset the number
2628          * of msix vectors variable to zero.  The only way to recover
2629          * is to unload/reload the driver and hope that the system has
2630          * been able to recover some MSIX vector resources.
2631          */
2632         if (!adapter->num_msix_vectors)
2633                 return -ENOMEM;
2634
2635         /* disallow open during test */
2636         if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2637                 return -EBUSY;
2638
2639         if (hw->adapter_stopped) {
2640                 ixgbevf_reset(adapter);
2641                 /* if adapter is still stopped then PF isn't up and
2642                  * the vf can't start. */
2643                 if (hw->adapter_stopped) {
2644                         err = IXGBE_ERR_MBX;
2645                         pr_err("Unable to start - perhaps the PF Driver isn't "
2646                                "up yet\n");
2647                         goto err_setup_reset;
2648                 }
2649         }
2650
2651         /* allocate transmit descriptors */
2652         err = ixgbevf_setup_all_tx_resources(adapter);
2653         if (err)
2654                 goto err_setup_tx;
2655
2656         /* allocate receive descriptors */
2657         err = ixgbevf_setup_all_rx_resources(adapter);
2658         if (err)
2659                 goto err_setup_rx;
2660
2661         ixgbevf_configure(adapter);
2662
2663         /*
2664          * Map the Tx/Rx rings to the vectors we were allotted.
2665          * if request_irq will be called in this function map_rings
2666          * must be called *before* up_complete
2667          */
2668         ixgbevf_map_rings_to_vectors(adapter);
2669
2670         ixgbevf_up_complete(adapter);
2671
2672         /* clear any pending interrupts, may auto mask */
2673         IXGBE_READ_REG(hw, IXGBE_VTEICR);
2674         err = ixgbevf_request_irq(adapter);
2675         if (err)
2676                 goto err_req_irq;
2677
2678         ixgbevf_irq_enable(adapter);
2679
2680         return 0;
2681
2682 err_req_irq:
2683         ixgbevf_down(adapter);
2684 err_setup_rx:
2685         ixgbevf_free_all_rx_resources(adapter);
2686 err_setup_tx:
2687         ixgbevf_free_all_tx_resources(adapter);
2688         ixgbevf_reset(adapter);
2689
2690 err_setup_reset:
2691
2692         return err;
2693 }
2694
2695 /**
2696  * ixgbevf_close - Disables a network interface
2697  * @netdev: network interface device structure
2698  *
2699  * Returns 0, this is not allowed to fail
2700  *
2701  * The close entry point is called when an interface is de-activated
2702  * by the OS.  The hardware is still under the drivers control, but
2703  * needs to be disabled.  A global MAC reset is issued to stop the
2704  * hardware, and all transmit and receive resources are freed.
2705  **/
2706 static int ixgbevf_close(struct net_device *netdev)
2707 {
2708         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2709
2710         ixgbevf_down(adapter);
2711         ixgbevf_free_irq(adapter);
2712
2713         ixgbevf_free_all_tx_resources(adapter);
2714         ixgbevf_free_all_rx_resources(adapter);
2715
2716         return 0;
2717 }
2718
2719 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
2720 {
2721         struct net_device *dev = adapter->netdev;
2722
2723         if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
2724                 return;
2725
2726         adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
2727
2728         /* if interface is down do nothing */
2729         if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2730             test_bit(__IXGBEVF_RESETTING, &adapter->state))
2731                 return;
2732
2733         /* Hardware has to reinitialize queues and interrupts to
2734          * match packet buffer alignment. Unfortunately, the
2735          * hardware is not flexible enough to do this dynamically.
2736          */
2737         if (netif_running(dev))
2738                 ixgbevf_close(dev);
2739
2740         ixgbevf_clear_interrupt_scheme(adapter);
2741         ixgbevf_init_interrupt_scheme(adapter);
2742
2743         if (netif_running(dev))
2744                 ixgbevf_open(dev);
2745 }
2746
2747 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2748                                 u32 vlan_macip_lens, u32 type_tucmd,
2749                                 u32 mss_l4len_idx)
2750 {
2751         struct ixgbe_adv_tx_context_desc *context_desc;
2752         u16 i = tx_ring->next_to_use;
2753
2754         context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2755
2756         i++;
2757         tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2758
2759         /* set bits to identify this as an advanced context descriptor */
2760         type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2761
2762         context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
2763         context_desc->seqnum_seed       = 0;
2764         context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
2765         context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
2766 }
2767
2768 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2769                        struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2770 {
2771         u32 vlan_macip_lens, type_tucmd;
2772         u32 mss_l4len_idx, l4len;
2773
2774         if (!skb_is_gso(skb))
2775                 return 0;
2776
2777         if (skb_header_cloned(skb)) {
2778                 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2779                 if (err)
2780                         return err;
2781         }
2782
2783         /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2784         type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2785
2786         if (skb->protocol == htons(ETH_P_IP)) {
2787                 struct iphdr *iph = ip_hdr(skb);
2788                 iph->tot_len = 0;
2789                 iph->check = 0;
2790                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2791                                                          iph->daddr, 0,
2792                                                          IPPROTO_TCP,
2793                                                          0);
2794                 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2795         } else if (skb_is_gso_v6(skb)) {
2796                 ipv6_hdr(skb)->payload_len = 0;
2797                 tcp_hdr(skb)->check =
2798                     ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2799                                      &ipv6_hdr(skb)->daddr,
2800                                      0, IPPROTO_TCP, 0);
2801         }
2802
2803         /* compute header lengths */
2804         l4len = tcp_hdrlen(skb);
2805         *hdr_len += l4len;
2806         *hdr_len = skb_transport_offset(skb) + l4len;
2807
2808         /* mss_l4len_id: use 1 as index for TSO */
2809         mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2810         mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2811         mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2812
2813         /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2814         vlan_macip_lens = skb_network_header_len(skb);
2815         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2816         vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2817
2818         ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2819                             type_tucmd, mss_l4len_idx);
2820
2821         return 1;
2822 }
2823
2824 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2825                             struct sk_buff *skb, u32 tx_flags)
2826 {
2827         u32 vlan_macip_lens = 0;
2828         u32 mss_l4len_idx = 0;
2829         u32 type_tucmd = 0;
2830
2831         if (skb->ip_summed == CHECKSUM_PARTIAL) {
2832                 u8 l4_hdr = 0;
2833                 switch (skb->protocol) {
2834                 case __constant_htons(ETH_P_IP):
2835                         vlan_macip_lens |= skb_network_header_len(skb);
2836                         type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2837                         l4_hdr = ip_hdr(skb)->protocol;
2838                         break;
2839                 case __constant_htons(ETH_P_IPV6):
2840                         vlan_macip_lens |= skb_network_header_len(skb);
2841                         l4_hdr = ipv6_hdr(skb)->nexthdr;
2842                         break;
2843                 default:
2844                         if (unlikely(net_ratelimit())) {
2845                                 dev_warn(tx_ring->dev,
2846                                  "partial checksum but proto=%x!\n",
2847                                  skb->protocol);
2848                         }
2849                         break;
2850                 }
2851
2852                 switch (l4_hdr) {
2853                 case IPPROTO_TCP:
2854                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2855                         mss_l4len_idx = tcp_hdrlen(skb) <<
2856                                         IXGBE_ADVTXD_L4LEN_SHIFT;
2857                         break;
2858                 case IPPROTO_SCTP:
2859                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2860                         mss_l4len_idx = sizeof(struct sctphdr) <<
2861                                         IXGBE_ADVTXD_L4LEN_SHIFT;
2862                         break;
2863                 case IPPROTO_UDP:
2864                         mss_l4len_idx = sizeof(struct udphdr) <<
2865                                         IXGBE_ADVTXD_L4LEN_SHIFT;
2866                         break;
2867                 default:
2868                         if (unlikely(net_ratelimit())) {
2869                                 dev_warn(tx_ring->dev,
2870                                  "partial checksum but l4 proto=%x!\n",
2871                                  l4_hdr);
2872                         }
2873                         break;
2874                 }
2875         }
2876
2877         /* vlan_macip_lens: MACLEN, VLAN tag */
2878         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2879         vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2880
2881         ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2882                             type_tucmd, mss_l4len_idx);
2883
2884         return (skb->ip_summed == CHECKSUM_PARTIAL);
2885 }
2886
2887 static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2888                           struct sk_buff *skb, u32 tx_flags)
2889 {
2890         struct ixgbevf_tx_buffer *tx_buffer_info;
2891         unsigned int len;
2892         unsigned int total = skb->len;
2893         unsigned int offset = 0, size;
2894         int count = 0;
2895         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2896         unsigned int f;
2897         int i;
2898
2899         i = tx_ring->next_to_use;
2900
2901         len = min(skb_headlen(skb), total);
2902         while (len) {
2903                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2904                 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2905
2906                 tx_buffer_info->length = size;
2907                 tx_buffer_info->mapped_as_page = false;
2908                 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2909                                                      skb->data + offset,
2910                                                      size, DMA_TO_DEVICE);
2911                 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2912                         goto dma_error;
2913
2914                 len -= size;
2915                 total -= size;
2916                 offset += size;
2917                 count++;
2918                 i++;
2919                 if (i == tx_ring->count)
2920                         i = 0;
2921         }
2922
2923         for (f = 0; f < nr_frags; f++) {
2924                 const struct skb_frag_struct *frag;
2925
2926                 frag = &skb_shinfo(skb)->frags[f];
2927                 len = min((unsigned int)skb_frag_size(frag), total);
2928                 offset = 0;
2929
2930                 while (len) {
2931                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
2932                         size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2933
2934                         tx_buffer_info->length = size;
2935                         tx_buffer_info->dma =
2936                                 skb_frag_dma_map(tx_ring->dev, frag,
2937                                                  offset, size, DMA_TO_DEVICE);
2938                         if (dma_mapping_error(tx_ring->dev,
2939                                               tx_buffer_info->dma))
2940                                 goto dma_error;
2941                         tx_buffer_info->mapped_as_page = true;
2942
2943                         len -= size;
2944                         total -= size;
2945                         offset += size;
2946                         count++;
2947                         i++;
2948                         if (i == tx_ring->count)
2949                                 i = 0;
2950                 }
2951                 if (total == 0)
2952                         break;
2953         }
2954
2955         if (i == 0)
2956                 i = tx_ring->count - 1;
2957         else
2958                 i = i - 1;
2959         tx_ring->tx_buffer_info[i].skb = skb;
2960
2961         return count;
2962
2963 dma_error:
2964         dev_err(tx_ring->dev, "TX DMA map failed\n");
2965
2966         /* clear timestamp and dma mappings for failed tx_buffer_info map */
2967         tx_buffer_info->dma = 0;
2968         count--;
2969
2970         /* clear timestamp and dma mappings for remaining portion of packet */
2971         while (count >= 0) {
2972                 count--;
2973                 i--;
2974                 if (i < 0)
2975                         i += tx_ring->count;
2976                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2977                 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2978         }
2979
2980         return count;
2981 }
2982
2983 static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2984                              int count, unsigned int first, u32 paylen,
2985                              u8 hdr_len)
2986 {
2987         union ixgbe_adv_tx_desc *tx_desc = NULL;
2988         struct ixgbevf_tx_buffer *tx_buffer_info;
2989         u32 olinfo_status = 0, cmd_type_len = 0;
2990         unsigned int i;
2991
2992         u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2993
2994         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2995
2996         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2997
2998         if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2999                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3000
3001         if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3002                 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
3003
3004         if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3005                 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3006
3007                 /* use index 1 context for tso */
3008                 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3009                 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3010                         olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
3011         }
3012
3013         /*
3014          * Check Context must be set if Tx switch is enabled, which it
3015          * always is for case where virtual functions are running
3016          */
3017         olinfo_status |= IXGBE_ADVTXD_CC;
3018
3019         olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3020
3021         i = tx_ring->next_to_use;
3022         while (count--) {
3023                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3024                 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3025                 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3026                 tx_desc->read.cmd_type_len =
3027                         cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3028                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3029                 i++;
3030                 if (i == tx_ring->count)
3031                         i = 0;
3032         }
3033
3034         tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3035
3036         tx_ring->tx_buffer_info[first].time_stamp = jiffies;
3037
3038         /* Force memory writes to complete before letting h/w
3039          * know there are new descriptors to fetch.  (Only
3040          * applicable for weak-ordered memory model archs,
3041          * such as IA-64).
3042          */
3043         wmb();
3044
3045         tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
3046         tx_ring->next_to_use = i;
3047 }
3048
3049 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3050 {
3051         struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3052
3053         netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3054         /* Herbert's original patch had:
3055          *  smp_mb__after_netif_stop_queue();
3056          * but since that doesn't exist yet, just open code it. */
3057         smp_mb();
3058
3059         /* We need to check again in a case another CPU has just
3060          * made room available. */
3061         if (likely(ixgbevf_desc_unused(tx_ring) < size))
3062                 return -EBUSY;
3063
3064         /* A reprieve! - use start_queue because it doesn't call schedule */
3065         netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3066         ++adapter->restart_queue;
3067         return 0;
3068 }
3069
3070 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3071 {
3072         if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3073                 return 0;
3074         return __ixgbevf_maybe_stop_tx(tx_ring, size);
3075 }
3076
3077 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3078 {
3079         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3080         struct ixgbevf_ring *tx_ring;
3081         unsigned int first;
3082         unsigned int tx_flags = 0;
3083         u8 hdr_len = 0;
3084         int r_idx = 0, tso;
3085         u16 count = TXD_USE_COUNT(skb_headlen(skb));
3086 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3087         unsigned short f;
3088 #endif
3089         u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3090         if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3091                 dev_kfree_skb(skb);
3092                 return NETDEV_TX_OK;
3093         }
3094
3095         tx_ring = adapter->tx_ring[r_idx];
3096
3097         /*
3098          * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3099          *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3100          *       + 2 desc gap to keep tail from touching head,
3101          *       + 1 desc for context descriptor,
3102          * otherwise try next time
3103          */
3104 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3105         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3106                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3107 #else
3108         count += skb_shinfo(skb)->nr_frags;
3109 #endif
3110         if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3111                 adapter->tx_busy++;
3112                 return NETDEV_TX_BUSY;
3113         }
3114
3115         if (vlan_tx_tag_present(skb)) {
3116                 tx_flags |= vlan_tx_tag_get(skb);
3117                 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3118                 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3119         }
3120
3121         first = tx_ring->next_to_use;
3122
3123         if (skb->protocol == htons(ETH_P_IP))
3124                 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3125         tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
3126         if (tso < 0) {
3127                 dev_kfree_skb_any(skb);
3128                 return NETDEV_TX_OK;
3129         }
3130
3131         if (tso)
3132                 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3133         else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3134                 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3135
3136         ixgbevf_tx_queue(tx_ring, tx_flags,
3137                          ixgbevf_tx_map(tx_ring, skb, tx_flags),
3138                          first, skb->len, hdr_len);
3139
3140         writel(tx_ring->next_to_use, tx_ring->tail);
3141
3142         ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3143
3144         return NETDEV_TX_OK;
3145 }
3146
3147 /**
3148  * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3149  * @netdev: network interface device structure
3150  * @p: pointer to an address structure
3151  *
3152  * Returns 0 on success, negative on failure
3153  **/
3154 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3155 {
3156         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3157         struct ixgbe_hw *hw = &adapter->hw;
3158         struct sockaddr *addr = p;
3159
3160         if (!is_valid_ether_addr(addr->sa_data))
3161                 return -EADDRNOTAVAIL;
3162
3163         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3164         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3165
3166         spin_lock_bh(&adapter->mbx_lock);
3167
3168         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3169
3170         spin_unlock_bh(&adapter->mbx_lock);
3171
3172         return 0;
3173 }
3174
3175 /**
3176  * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3177  * @netdev: network interface device structure
3178  * @new_mtu: new value for maximum frame size
3179  *
3180  * Returns 0 on success, negative on failure
3181  **/
3182 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3183 {
3184         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3185         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3186         int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3187
3188         switch (adapter->hw.api_version) {
3189         case ixgbe_mbox_api_11:
3190                 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3191                 break;
3192         default:
3193                 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3194                         max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3195                 break;
3196         }
3197
3198         /* MTU < 68 is an error and causes problems on some kernels */
3199         if ((new_mtu < 68) || (max_frame > max_possible_frame))
3200                 return -EINVAL;
3201
3202         hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3203                netdev->mtu, new_mtu);
3204         /* must set new MTU before calling down or up */
3205         netdev->mtu = new_mtu;
3206
3207         if (netif_running(netdev))
3208                 ixgbevf_reinit_locked(adapter);
3209
3210         return 0;
3211 }
3212
3213 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3214 {
3215         struct net_device *netdev = pci_get_drvdata(pdev);
3216         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3217 #ifdef CONFIG_PM
3218         int retval = 0;
3219 #endif
3220
3221         netif_device_detach(netdev);
3222
3223         if (netif_running(netdev)) {
3224                 rtnl_lock();
3225                 ixgbevf_down(adapter);
3226                 ixgbevf_free_irq(adapter);
3227                 ixgbevf_free_all_tx_resources(adapter);
3228                 ixgbevf_free_all_rx_resources(adapter);
3229                 rtnl_unlock();
3230         }
3231
3232         ixgbevf_clear_interrupt_scheme(adapter);
3233
3234 #ifdef CONFIG_PM
3235         retval = pci_save_state(pdev);
3236         if (retval)
3237                 return retval;
3238
3239 #endif
3240         pci_disable_device(pdev);
3241
3242         return 0;
3243 }
3244
3245 #ifdef CONFIG_PM
3246 static int ixgbevf_resume(struct pci_dev *pdev)
3247 {
3248         struct net_device *netdev = pci_get_drvdata(pdev);
3249         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3250         u32 err;
3251
3252         pci_set_power_state(pdev, PCI_D0);
3253         pci_restore_state(pdev);
3254         /*
3255          * pci_restore_state clears dev->state_saved so call
3256          * pci_save_state to restore it.
3257          */
3258         pci_save_state(pdev);
3259
3260         err = pci_enable_device_mem(pdev);
3261         if (err) {
3262                 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3263                 return err;
3264         }
3265         pci_set_master(pdev);
3266
3267         ixgbevf_reset(adapter);
3268
3269         rtnl_lock();
3270         err = ixgbevf_init_interrupt_scheme(adapter);
3271         rtnl_unlock();
3272         if (err) {
3273                 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3274                 return err;
3275         }
3276
3277         if (netif_running(netdev)) {
3278                 err = ixgbevf_open(netdev);
3279                 if (err)
3280                         return err;
3281         }
3282
3283         netif_device_attach(netdev);
3284
3285         return err;
3286 }
3287
3288 #endif /* CONFIG_PM */
3289 static void ixgbevf_shutdown(struct pci_dev *pdev)
3290 {
3291         ixgbevf_suspend(pdev, PMSG_SUSPEND);
3292 }
3293
3294 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3295                                                 struct rtnl_link_stats64 *stats)
3296 {
3297         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3298         unsigned int start;
3299         u64 bytes, packets;
3300         const struct ixgbevf_ring *ring;
3301         int i;
3302
3303         ixgbevf_update_stats(adapter);
3304
3305         stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3306
3307         for (i = 0; i < adapter->num_rx_queues; i++) {
3308                 ring = adapter->rx_ring[i];
3309                 do {
3310                         start = u64_stats_fetch_begin_bh(&ring->syncp);
3311                         bytes = ring->total_bytes;
3312                         packets = ring->total_packets;
3313                 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3314                 stats->rx_bytes += bytes;
3315                 stats->rx_packets += packets;
3316         }
3317
3318         for (i = 0; i < adapter->num_tx_queues; i++) {
3319                 ring = adapter->tx_ring[i];
3320                 do {
3321                         start = u64_stats_fetch_begin_bh(&ring->syncp);
3322                         bytes = ring->total_bytes;
3323                         packets = ring->total_packets;
3324                 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3325                 stats->tx_bytes += bytes;
3326                 stats->tx_packets += packets;
3327         }
3328
3329         return stats;
3330 }
3331
3332 static const struct net_device_ops ixgbevf_netdev_ops = {
3333         .ndo_open               = ixgbevf_open,
3334         .ndo_stop               = ixgbevf_close,
3335         .ndo_start_xmit         = ixgbevf_xmit_frame,
3336         .ndo_set_rx_mode        = ixgbevf_set_rx_mode,
3337         .ndo_get_stats64        = ixgbevf_get_stats,
3338         .ndo_validate_addr      = eth_validate_addr,
3339         .ndo_set_mac_address    = ixgbevf_set_mac,
3340         .ndo_change_mtu         = ixgbevf_change_mtu,
3341         .ndo_tx_timeout         = ixgbevf_tx_timeout,
3342         .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
3343         .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
3344 #ifdef CONFIG_NET_RX_BUSY_POLL
3345         .ndo_busy_poll          = ixgbevf_busy_poll_recv,
3346 #endif
3347 };
3348
3349 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3350 {
3351         dev->netdev_ops = &ixgbevf_netdev_ops;
3352         ixgbevf_set_ethtool_ops(dev);
3353         dev->watchdog_timeo = 5 * HZ;
3354 }
3355
3356 /**
3357  * ixgbevf_probe - Device Initialization Routine
3358  * @pdev: PCI device information struct
3359  * @ent: entry in ixgbevf_pci_tbl
3360  *
3361  * Returns 0 on success, negative on failure
3362  *
3363  * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3364  * The OS initialization, configuring of the adapter private structure,
3365  * and a hardware reset occur.
3366  **/
3367 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3368 {
3369         struct net_device *netdev;
3370         struct ixgbevf_adapter *adapter = NULL;
3371         struct ixgbe_hw *hw = NULL;
3372         const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3373         static int cards_found;
3374         int err, pci_using_dac;
3375
3376         err = pci_enable_device(pdev);
3377         if (err)
3378                 return err;
3379
3380         if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3381                 pci_using_dac = 1;
3382         } else {
3383                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3384                 if (err) {
3385                         dev_err(&pdev->dev, "No usable DMA "
3386                                 "configuration, aborting\n");
3387                         goto err_dma;
3388                 }
3389                 pci_using_dac = 0;
3390         }
3391
3392         err = pci_request_regions(pdev, ixgbevf_driver_name);
3393         if (err) {
3394                 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3395                 goto err_pci_reg;
3396         }
3397
3398         pci_set_master(pdev);
3399
3400         netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3401                                    MAX_TX_QUEUES);
3402         if (!netdev) {
3403                 err = -ENOMEM;
3404                 goto err_alloc_etherdev;
3405         }
3406
3407         SET_NETDEV_DEV(netdev, &pdev->dev);
3408
3409         pci_set_drvdata(pdev, netdev);
3410         adapter = netdev_priv(netdev);
3411
3412         adapter->netdev = netdev;
3413         adapter->pdev = pdev;
3414         hw = &adapter->hw;
3415         hw->back = adapter;
3416         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3417
3418         /*
3419          * call save state here in standalone driver because it relies on
3420          * adapter struct to exist, and needs to call netdev_priv
3421          */
3422         pci_save_state(pdev);
3423
3424         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3425                               pci_resource_len(pdev, 0));
3426         if (!hw->hw_addr) {
3427                 err = -EIO;
3428                 goto err_ioremap;
3429         }
3430
3431         ixgbevf_assign_netdev_ops(netdev);
3432
3433         adapter->bd_number = cards_found;
3434
3435         /* Setup hw api */
3436         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3437         hw->mac.type  = ii->mac;
3438
3439         memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3440                sizeof(struct ixgbe_mbx_operations));
3441
3442         /* setup the private structure */
3443         err = ixgbevf_sw_init(adapter);
3444         if (err)
3445                 goto err_sw_init;
3446
3447         /* The HW MAC address was set and/or determined in sw_init */
3448         if (!is_valid_ether_addr(netdev->dev_addr)) {
3449                 pr_err("invalid MAC address\n");
3450                 err = -EIO;
3451                 goto err_sw_init;
3452         }
3453
3454         netdev->hw_features = NETIF_F_SG |
3455                            NETIF_F_IP_CSUM |
3456                            NETIF_F_IPV6_CSUM |
3457                            NETIF_F_TSO |
3458                            NETIF_F_TSO6 |
3459                            NETIF_F_RXCSUM;
3460
3461         netdev->features = netdev->hw_features |
3462                            NETIF_F_HW_VLAN_CTAG_TX |
3463                            NETIF_F_HW_VLAN_CTAG_RX |
3464                            NETIF_F_HW_VLAN_CTAG_FILTER;
3465
3466         netdev->vlan_features |= NETIF_F_TSO;
3467         netdev->vlan_features |= NETIF_F_TSO6;
3468         netdev->vlan_features |= NETIF_F_IP_CSUM;
3469         netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3470         netdev->vlan_features |= NETIF_F_SG;
3471
3472         if (pci_using_dac)
3473                 netdev->features |= NETIF_F_HIGHDMA;
3474
3475         netdev->priv_flags |= IFF_UNICAST_FLT;
3476
3477         init_timer(&adapter->watchdog_timer);
3478         adapter->watchdog_timer.function = ixgbevf_watchdog;
3479         adapter->watchdog_timer.data = (unsigned long)adapter;
3480
3481         INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3482         INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3483
3484         err = ixgbevf_init_interrupt_scheme(adapter);
3485         if (err)
3486                 goto err_sw_init;
3487
3488         strcpy(netdev->name, "eth%d");
3489
3490         err = register_netdev(netdev);
3491         if (err)
3492                 goto err_register;
3493
3494         netif_carrier_off(netdev);
3495
3496         ixgbevf_init_last_counter_stats(adapter);
3497
3498         /* print the MAC address */
3499         hw_dbg(hw, "%pM\n", netdev->dev_addr);
3500
3501         hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3502
3503         hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3504         cards_found++;
3505         return 0;
3506
3507 err_register:
3508         ixgbevf_clear_interrupt_scheme(adapter);
3509 err_sw_init:
3510         ixgbevf_reset_interrupt_capability(adapter);
3511         iounmap(hw->hw_addr);
3512 err_ioremap:
3513         free_netdev(netdev);
3514 err_alloc_etherdev:
3515         pci_release_regions(pdev);
3516 err_pci_reg:
3517 err_dma:
3518         pci_disable_device(pdev);
3519         return err;
3520 }
3521
3522 /**
3523  * ixgbevf_remove - Device Removal Routine
3524  * @pdev: PCI device information struct
3525  *
3526  * ixgbevf_remove is called by the PCI subsystem to alert the driver
3527  * that it should release a PCI device.  The could be caused by a
3528  * Hot-Plug event, or because the driver is going to be removed from
3529  * memory.
3530  **/
3531 static void ixgbevf_remove(struct pci_dev *pdev)
3532 {
3533         struct net_device *netdev = pci_get_drvdata(pdev);
3534         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3535
3536         set_bit(__IXGBEVF_DOWN, &adapter->state);
3537
3538         del_timer_sync(&adapter->watchdog_timer);
3539
3540         cancel_work_sync(&adapter->reset_task);
3541         cancel_work_sync(&adapter->watchdog_task);
3542
3543         if (netdev->reg_state == NETREG_REGISTERED)
3544                 unregister_netdev(netdev);
3545
3546         ixgbevf_clear_interrupt_scheme(adapter);
3547         ixgbevf_reset_interrupt_capability(adapter);
3548
3549         iounmap(adapter->hw.hw_addr);
3550         pci_release_regions(pdev);
3551
3552         hw_dbg(&adapter->hw, "Remove complete\n");
3553
3554         free_netdev(netdev);
3555
3556         pci_disable_device(pdev);
3557 }
3558
3559 /**
3560  * ixgbevf_io_error_detected - called when PCI error is detected
3561  * @pdev: Pointer to PCI device
3562  * @state: The current pci connection state
3563  *
3564  * This function is called after a PCI bus error affecting
3565  * this device has been detected.
3566  */
3567 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3568                                                   pci_channel_state_t state)
3569 {
3570         struct net_device *netdev = pci_get_drvdata(pdev);
3571         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3572
3573         netif_device_detach(netdev);
3574
3575         if (state == pci_channel_io_perm_failure)
3576                 return PCI_ERS_RESULT_DISCONNECT;
3577
3578         if (netif_running(netdev))
3579                 ixgbevf_down(adapter);
3580
3581         pci_disable_device(pdev);
3582
3583         /* Request a slot slot reset. */
3584         return PCI_ERS_RESULT_NEED_RESET;
3585 }
3586
3587 /**
3588  * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3589  * @pdev: Pointer to PCI device
3590  *
3591  * Restart the card from scratch, as if from a cold-boot. Implementation
3592  * resembles the first-half of the ixgbevf_resume routine.
3593  */
3594 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3595 {
3596         struct net_device *netdev = pci_get_drvdata(pdev);
3597         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3598
3599         if (pci_enable_device_mem(pdev)) {
3600                 dev_err(&pdev->dev,
3601                         "Cannot re-enable PCI device after reset.\n");
3602                 return PCI_ERS_RESULT_DISCONNECT;
3603         }
3604
3605         pci_set_master(pdev);
3606
3607         ixgbevf_reset(adapter);
3608
3609         return PCI_ERS_RESULT_RECOVERED;
3610 }
3611
3612 /**
3613  * ixgbevf_io_resume - called when traffic can start flowing again.
3614  * @pdev: Pointer to PCI device
3615  *
3616  * This callback is called when the error recovery driver tells us that
3617  * its OK to resume normal operation. Implementation resembles the
3618  * second-half of the ixgbevf_resume routine.
3619  */
3620 static void ixgbevf_io_resume(struct pci_dev *pdev)
3621 {
3622         struct net_device *netdev = pci_get_drvdata(pdev);
3623         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3624
3625         if (netif_running(netdev))
3626                 ixgbevf_up(adapter);
3627
3628         netif_device_attach(netdev);
3629 }
3630
3631 /* PCI Error Recovery (ERS) */
3632 static const struct pci_error_handlers ixgbevf_err_handler = {
3633         .error_detected = ixgbevf_io_error_detected,
3634         .slot_reset = ixgbevf_io_slot_reset,
3635         .resume = ixgbevf_io_resume,
3636 };
3637
3638 static struct pci_driver ixgbevf_driver = {
3639         .name     = ixgbevf_driver_name,
3640         .id_table = ixgbevf_pci_tbl,
3641         .probe    = ixgbevf_probe,
3642         .remove   = ixgbevf_remove,
3643 #ifdef CONFIG_PM
3644         /* Power Management Hooks */
3645         .suspend  = ixgbevf_suspend,
3646         .resume   = ixgbevf_resume,
3647 #endif
3648         .shutdown = ixgbevf_shutdown,
3649         .err_handler = &ixgbevf_err_handler
3650 };
3651
3652 /**
3653  * ixgbevf_init_module - Driver Registration Routine
3654  *
3655  * ixgbevf_init_module is the first routine called when the driver is
3656  * loaded. All it does is register with the PCI subsystem.
3657  **/
3658 static int __init ixgbevf_init_module(void)
3659 {
3660         int ret;
3661         pr_info("%s - version %s\n", ixgbevf_driver_string,
3662                 ixgbevf_driver_version);
3663
3664         pr_info("%s\n", ixgbevf_copyright);
3665
3666         ret = pci_register_driver(&ixgbevf_driver);
3667         return ret;
3668 }
3669
3670 module_init(ixgbevf_init_module);
3671
3672 /**
3673  * ixgbevf_exit_module - Driver Exit Cleanup Routine
3674  *
3675  * ixgbevf_exit_module is called just before the driver is removed
3676  * from memory.
3677  **/
3678 static void __exit ixgbevf_exit_module(void)
3679 {
3680         pci_unregister_driver(&ixgbevf_driver);
3681 }
3682
3683 #ifdef DEBUG
3684 /**
3685  * ixgbevf_get_hw_dev_name - return device name string
3686  * used by hardware layer to print debugging information
3687  **/
3688 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3689 {
3690         struct ixgbevf_adapter *adapter = hw->back;
3691         return adapter->netdev->name;
3692 }
3693
3694 #endif
3695 module_exit(ixgbevf_exit_module);
3696
3697 /* ixgbevf_main.c */