ixgbe: Add support for VLAN promiscuous with SR-IOV
[cascardo/linux.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_main.c
1 /*******************************************************************************
2
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2015 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include <linux/types.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/vmalloc.h>
34 #include <linux/string.h>
35 #include <linux/in.h>
36 #include <linux/interrupt.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/sctp.h>
40 #include <linux/pkt_sched.h>
41 #include <linux/ipv6.h>
42 #include <linux/slab.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/etherdevice.h>
46 #include <linux/ethtool.h>
47 #include <linux/if.h>
48 #include <linux/if_vlan.h>
49 #include <linux/if_macvlan.h>
50 #include <linux/if_bridge.h>
51 #include <linux/prefetch.h>
52 #include <scsi/fc/fc_fcoe.h>
53 #include <net/vxlan.h>
54
55 #ifdef CONFIG_OF
56 #include <linux/of_net.h>
57 #endif
58
59 #ifdef CONFIG_SPARC
60 #include <asm/idprom.h>
61 #include <asm/prom.h>
62 #endif
63
64 #include "ixgbe.h"
65 #include "ixgbe_common.h"
66 #include "ixgbe_dcb_82599.h"
67 #include "ixgbe_sriov.h"
68
69 char ixgbe_driver_name[] = "ixgbe";
70 static const char ixgbe_driver_string[] =
71                               "Intel(R) 10 Gigabit PCI Express Network Driver";
72 #ifdef IXGBE_FCOE
73 char ixgbe_default_device_descr[] =
74                               "Intel(R) 10 Gigabit Network Connection";
75 #else
76 static char ixgbe_default_device_descr[] =
77                               "Intel(R) 10 Gigabit Network Connection";
78 #endif
79 #define DRV_VERSION "4.2.1-k"
80 const char ixgbe_driver_version[] = DRV_VERSION;
81 static const char ixgbe_copyright[] =
82                                 "Copyright (c) 1999-2015 Intel Corporation.";
83
84 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
85
86 static const struct ixgbe_info *ixgbe_info_tbl[] = {
87         [board_82598]           = &ixgbe_82598_info,
88         [board_82599]           = &ixgbe_82599_info,
89         [board_X540]            = &ixgbe_X540_info,
90         [board_X550]            = &ixgbe_X550_info,
91         [board_X550EM_x]        = &ixgbe_X550EM_x_info,
92 };
93
94 /* ixgbe_pci_tbl - PCI Device ID Table
95  *
96  * Wildcard entries (PCI_ANY_ID) should come last
97  * Last entry must be all 0s
98  *
99  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
100  *   Class, Class Mask, private data (not used) }
101  */
102 static const struct pci_device_id ixgbe_pci_tbl[] = {
103         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
104         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
105         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
106         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
107         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
108         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
109         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
110         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
111         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
112         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
113         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
114         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
115         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
116         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
117         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
118         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
119         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
120         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
121         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
122         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
123         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
124         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
125         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
126         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
127         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
128         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
129         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
130         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
131         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
132         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
133         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
134         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
135         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
136         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
137         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
138         /* required last entry */
139         {0, }
140 };
141 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
142
143 #ifdef CONFIG_IXGBE_DCA
144 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
145                             void *p);
146 static struct notifier_block dca_notifier = {
147         .notifier_call = ixgbe_notify_dca,
148         .next          = NULL,
149         .priority      = 0
150 };
151 #endif
152
153 #ifdef CONFIG_PCI_IOV
154 static unsigned int max_vfs;
155 module_param(max_vfs, uint, 0);
156 MODULE_PARM_DESC(max_vfs,
157                  "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
158 #endif /* CONFIG_PCI_IOV */
159
160 static unsigned int allow_unsupported_sfp;
161 module_param(allow_unsupported_sfp, uint, 0);
162 MODULE_PARM_DESC(allow_unsupported_sfp,
163                  "Allow unsupported and untested SFP+ modules on 82599-based adapters");
164
165 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
166 static int debug = -1;
167 module_param(debug, int, 0);
168 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
169
170 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
171 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
172 MODULE_LICENSE("GPL");
173 MODULE_VERSION(DRV_VERSION);
174
175 static struct workqueue_struct *ixgbe_wq;
176
177 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
178
179 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
180                                           u32 reg, u16 *value)
181 {
182         struct pci_dev *parent_dev;
183         struct pci_bus *parent_bus;
184
185         parent_bus = adapter->pdev->bus->parent;
186         if (!parent_bus)
187                 return -1;
188
189         parent_dev = parent_bus->self;
190         if (!parent_dev)
191                 return -1;
192
193         if (!pci_is_pcie(parent_dev))
194                 return -1;
195
196         pcie_capability_read_word(parent_dev, reg, value);
197         if (*value == IXGBE_FAILED_READ_CFG_WORD &&
198             ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
199                 return -1;
200         return 0;
201 }
202
203 static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
204 {
205         struct ixgbe_hw *hw = &adapter->hw;
206         u16 link_status = 0;
207         int err;
208
209         hw->bus.type = ixgbe_bus_type_pci_express;
210
211         /* Get the negotiated link width and speed from PCI config space of the
212          * parent, as this device is behind a switch
213          */
214         err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
215
216         /* assume caller will handle error case */
217         if (err)
218                 return err;
219
220         hw->bus.width = ixgbe_convert_bus_width(link_status);
221         hw->bus.speed = ixgbe_convert_bus_speed(link_status);
222
223         return 0;
224 }
225
226 /**
227  * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
228  * @hw: hw specific details
229  *
230  * This function is used by probe to determine whether a device's PCI-Express
231  * bandwidth details should be gathered from the parent bus instead of from the
232  * device. Used to ensure that various locations all have the correct device ID
233  * checks.
234  */
235 static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
236 {
237         switch (hw->device_id) {
238         case IXGBE_DEV_ID_82599_SFP_SF_QP:
239         case IXGBE_DEV_ID_82599_QSFP_SF_QP:
240                 return true;
241         default:
242                 return false;
243         }
244 }
245
246 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
247                                      int expected_gts)
248 {
249         struct ixgbe_hw *hw = &adapter->hw;
250         int max_gts = 0;
251         enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
252         enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
253         struct pci_dev *pdev;
254
255         /* Some devices are not connected over PCIe and thus do not negotiate
256          * speed. These devices do not have valid bus info, and thus any report
257          * we generate may not be correct.
258          */
259         if (hw->bus.type == ixgbe_bus_type_internal)
260                 return;
261
262         /* determine whether to use the parent device */
263         if (ixgbe_pcie_from_parent(&adapter->hw))
264                 pdev = adapter->pdev->bus->parent->self;
265         else
266                 pdev = adapter->pdev;
267
268         if (pcie_get_minimum_link(pdev, &speed, &width) ||
269             speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
270                 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
271                 return;
272         }
273
274         switch (speed) {
275         case PCIE_SPEED_2_5GT:
276                 /* 8b/10b encoding reduces max throughput by 20% */
277                 max_gts = 2 * width;
278                 break;
279         case PCIE_SPEED_5_0GT:
280                 /* 8b/10b encoding reduces max throughput by 20% */
281                 max_gts = 4 * width;
282                 break;
283         case PCIE_SPEED_8_0GT:
284                 /* 128b/130b encoding reduces throughput by less than 2% */
285                 max_gts = 8 * width;
286                 break;
287         default:
288                 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
289                 return;
290         }
291
292         e_dev_info("PCI Express bandwidth of %dGT/s available\n",
293                    max_gts);
294         e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
295                    (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
296                     speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
297                     speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
298                     "Unknown"),
299                    width,
300                    (speed == PCIE_SPEED_2_5GT ? "20%" :
301                     speed == PCIE_SPEED_5_0GT ? "20%" :
302                     speed == PCIE_SPEED_8_0GT ? "<2%" :
303                     "Unknown"));
304
305         if (max_gts < expected_gts) {
306                 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
307                 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
308                         expected_gts);
309                 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
310         }
311 }
312
313 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
314 {
315         if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
316             !test_bit(__IXGBE_REMOVING, &adapter->state) &&
317             !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
318                 queue_work(ixgbe_wq, &adapter->service_task);
319 }
320
321 static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
322 {
323         struct ixgbe_adapter *adapter = hw->back;
324
325         if (!hw->hw_addr)
326                 return;
327         hw->hw_addr = NULL;
328         e_dev_err("Adapter removed\n");
329         if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
330                 ixgbe_service_event_schedule(adapter);
331 }
332
333 static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
334 {
335         u32 value;
336
337         /* The following check not only optimizes a bit by not
338          * performing a read on the status register when the
339          * register just read was a status register read that
340          * returned IXGBE_FAILED_READ_REG. It also blocks any
341          * potential recursion.
342          */
343         if (reg == IXGBE_STATUS) {
344                 ixgbe_remove_adapter(hw);
345                 return;
346         }
347         value = ixgbe_read_reg(hw, IXGBE_STATUS);
348         if (value == IXGBE_FAILED_READ_REG)
349                 ixgbe_remove_adapter(hw);
350 }
351
352 /**
353  * ixgbe_read_reg - Read from device register
354  * @hw: hw specific details
355  * @reg: offset of register to read
356  *
357  * Returns : value read or IXGBE_FAILED_READ_REG if removed
358  *
359  * This function is used to read device registers. It checks for device
360  * removal by confirming any read that returns all ones by checking the
361  * status register value for all ones. This function avoids reading from
362  * the hardware if a removal was previously detected in which case it
363  * returns IXGBE_FAILED_READ_REG (all ones).
364  */
365 u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
366 {
367         u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
368         u32 value;
369
370         if (ixgbe_removed(reg_addr))
371                 return IXGBE_FAILED_READ_REG;
372         value = readl(reg_addr + reg);
373         if (unlikely(value == IXGBE_FAILED_READ_REG))
374                 ixgbe_check_remove(hw, reg);
375         return value;
376 }
377
378 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
379 {
380         u16 value;
381
382         pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
383         if (value == IXGBE_FAILED_READ_CFG_WORD) {
384                 ixgbe_remove_adapter(hw);
385                 return true;
386         }
387         return false;
388 }
389
390 u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
391 {
392         struct ixgbe_adapter *adapter = hw->back;
393         u16 value;
394
395         if (ixgbe_removed(hw->hw_addr))
396                 return IXGBE_FAILED_READ_CFG_WORD;
397         pci_read_config_word(adapter->pdev, reg, &value);
398         if (value == IXGBE_FAILED_READ_CFG_WORD &&
399             ixgbe_check_cfg_remove(hw, adapter->pdev))
400                 return IXGBE_FAILED_READ_CFG_WORD;
401         return value;
402 }
403
404 #ifdef CONFIG_PCI_IOV
405 static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
406 {
407         struct ixgbe_adapter *adapter = hw->back;
408         u32 value;
409
410         if (ixgbe_removed(hw->hw_addr))
411                 return IXGBE_FAILED_READ_CFG_DWORD;
412         pci_read_config_dword(adapter->pdev, reg, &value);
413         if (value == IXGBE_FAILED_READ_CFG_DWORD &&
414             ixgbe_check_cfg_remove(hw, adapter->pdev))
415                 return IXGBE_FAILED_READ_CFG_DWORD;
416         return value;
417 }
418 #endif /* CONFIG_PCI_IOV */
419
420 void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
421 {
422         struct ixgbe_adapter *adapter = hw->back;
423
424         if (ixgbe_removed(hw->hw_addr))
425                 return;
426         pci_write_config_word(adapter->pdev, reg, value);
427 }
428
429 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
430 {
431         BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
432
433         /* flush memory to make sure state is correct before next watchdog */
434         smp_mb__before_atomic();
435         clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
436 }
437
438 struct ixgbe_reg_info {
439         u32 ofs;
440         char *name;
441 };
442
443 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
444
445         /* General Registers */
446         {IXGBE_CTRL, "CTRL"},
447         {IXGBE_STATUS, "STATUS"},
448         {IXGBE_CTRL_EXT, "CTRL_EXT"},
449
450         /* Interrupt Registers */
451         {IXGBE_EICR, "EICR"},
452
453         /* RX Registers */
454         {IXGBE_SRRCTL(0), "SRRCTL"},
455         {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
456         {IXGBE_RDLEN(0), "RDLEN"},
457         {IXGBE_RDH(0), "RDH"},
458         {IXGBE_RDT(0), "RDT"},
459         {IXGBE_RXDCTL(0), "RXDCTL"},
460         {IXGBE_RDBAL(0), "RDBAL"},
461         {IXGBE_RDBAH(0), "RDBAH"},
462
463         /* TX Registers */
464         {IXGBE_TDBAL(0), "TDBAL"},
465         {IXGBE_TDBAH(0), "TDBAH"},
466         {IXGBE_TDLEN(0), "TDLEN"},
467         {IXGBE_TDH(0), "TDH"},
468         {IXGBE_TDT(0), "TDT"},
469         {IXGBE_TXDCTL(0), "TXDCTL"},
470
471         /* List Terminator */
472         { .name = NULL }
473 };
474
475
476 /*
477  * ixgbe_regdump - register printout routine
478  */
479 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
480 {
481         int i = 0, j = 0;
482         char rname[16];
483         u32 regs[64];
484
485         switch (reginfo->ofs) {
486         case IXGBE_SRRCTL(0):
487                 for (i = 0; i < 64; i++)
488                         regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
489                 break;
490         case IXGBE_DCA_RXCTRL(0):
491                 for (i = 0; i < 64; i++)
492                         regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
493                 break;
494         case IXGBE_RDLEN(0):
495                 for (i = 0; i < 64; i++)
496                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
497                 break;
498         case IXGBE_RDH(0):
499                 for (i = 0; i < 64; i++)
500                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
501                 break;
502         case IXGBE_RDT(0):
503                 for (i = 0; i < 64; i++)
504                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
505                 break;
506         case IXGBE_RXDCTL(0):
507                 for (i = 0; i < 64; i++)
508                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
509                 break;
510         case IXGBE_RDBAL(0):
511                 for (i = 0; i < 64; i++)
512                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
513                 break;
514         case IXGBE_RDBAH(0):
515                 for (i = 0; i < 64; i++)
516                         regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
517                 break;
518         case IXGBE_TDBAL(0):
519                 for (i = 0; i < 64; i++)
520                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
521                 break;
522         case IXGBE_TDBAH(0):
523                 for (i = 0; i < 64; i++)
524                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
525                 break;
526         case IXGBE_TDLEN(0):
527                 for (i = 0; i < 64; i++)
528                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
529                 break;
530         case IXGBE_TDH(0):
531                 for (i = 0; i < 64; i++)
532                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
533                 break;
534         case IXGBE_TDT(0):
535                 for (i = 0; i < 64; i++)
536                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
537                 break;
538         case IXGBE_TXDCTL(0):
539                 for (i = 0; i < 64; i++)
540                         regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
541                 break;
542         default:
543                 pr_info("%-15s %08x\n", reginfo->name,
544                         IXGBE_READ_REG(hw, reginfo->ofs));
545                 return;
546         }
547
548         for (i = 0; i < 8; i++) {
549                 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
550                 pr_err("%-15s", rname);
551                 for (j = 0; j < 8; j++)
552                         pr_cont(" %08x", regs[i*8+j]);
553                 pr_cont("\n");
554         }
555
556 }
557
558 /*
559  * ixgbe_dump - Print registers, tx-rings and rx-rings
560  */
561 static void ixgbe_dump(struct ixgbe_adapter *adapter)
562 {
563         struct net_device *netdev = adapter->netdev;
564         struct ixgbe_hw *hw = &adapter->hw;
565         struct ixgbe_reg_info *reginfo;
566         int n = 0;
567         struct ixgbe_ring *tx_ring;
568         struct ixgbe_tx_buffer *tx_buffer;
569         union ixgbe_adv_tx_desc *tx_desc;
570         struct my_u0 { u64 a; u64 b; } *u0;
571         struct ixgbe_ring *rx_ring;
572         union ixgbe_adv_rx_desc *rx_desc;
573         struct ixgbe_rx_buffer *rx_buffer_info;
574         u32 staterr;
575         int i = 0;
576
577         if (!netif_msg_hw(adapter))
578                 return;
579
580         /* Print netdevice Info */
581         if (netdev) {
582                 dev_info(&adapter->pdev->dev, "Net device Info\n");
583                 pr_info("Device Name     state            "
584                         "trans_start      last_rx\n");
585                 pr_info("%-15s %016lX %016lX %016lX\n",
586                         netdev->name,
587                         netdev->state,
588                         netdev->trans_start,
589                         netdev->last_rx);
590         }
591
592         /* Print Registers */
593         dev_info(&adapter->pdev->dev, "Register Dump\n");
594         pr_info(" Register Name   Value\n");
595         for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
596              reginfo->name; reginfo++) {
597                 ixgbe_regdump(hw, reginfo);
598         }
599
600         /* Print TX Ring Summary */
601         if (!netdev || !netif_running(netdev))
602                 return;
603
604         dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
605         pr_info(" %s     %s              %s        %s\n",
606                 "Queue [NTU] [NTC] [bi(ntc)->dma  ]",
607                 "leng", "ntw", "timestamp");
608         for (n = 0; n < adapter->num_tx_queues; n++) {
609                 tx_ring = adapter->tx_ring[n];
610                 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
611                 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
612                            n, tx_ring->next_to_use, tx_ring->next_to_clean,
613                            (u64)dma_unmap_addr(tx_buffer, dma),
614                            dma_unmap_len(tx_buffer, len),
615                            tx_buffer->next_to_watch,
616                            (u64)tx_buffer->time_stamp);
617         }
618
619         /* Print TX Rings */
620         if (!netif_msg_tx_done(adapter))
621                 goto rx_ring_summary;
622
623         dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
624
625         /* Transmit Descriptor Formats
626          *
627          * 82598 Advanced Transmit Descriptor
628          *   +--------------------------------------------------------------+
629          * 0 |         Buffer Address [63:0]                                |
630          *   +--------------------------------------------------------------+
631          * 8 |  PAYLEN  | POPTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
632          *   +--------------------------------------------------------------+
633          *   63       46 45    40 39 36 35 32 31   24 23 20 19              0
634          *
635          * 82598 Advanced Transmit Descriptor (Write-Back Format)
636          *   +--------------------------------------------------------------+
637          * 0 |                          RSV [63:0]                          |
638          *   +--------------------------------------------------------------+
639          * 8 |            RSV           |  STA  |          NXTSEQ           |
640          *   +--------------------------------------------------------------+
641          *   63                       36 35   32 31                         0
642          *
643          * 82599+ Advanced Transmit Descriptor
644          *   +--------------------------------------------------------------+
645          * 0 |         Buffer Address [63:0]                                |
646          *   +--------------------------------------------------------------+
647          * 8 |PAYLEN  |POPTS|CC|IDX  |STA  |DCMD  |DTYP |MAC  |RSV  |DTALEN |
648          *   +--------------------------------------------------------------+
649          *   63     46 45 40 39 38 36 35 32 31  24 23 20 19 18 17 16 15     0
650          *
651          * 82599+ Advanced Transmit Descriptor (Write-Back Format)
652          *   +--------------------------------------------------------------+
653          * 0 |                          RSV [63:0]                          |
654          *   +--------------------------------------------------------------+
655          * 8 |            RSV           |  STA  |           RSV             |
656          *   +--------------------------------------------------------------+
657          *   63                       36 35   32 31                         0
658          */
659
660         for (n = 0; n < adapter->num_tx_queues; n++) {
661                 tx_ring = adapter->tx_ring[n];
662                 pr_info("------------------------------------\n");
663                 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
664                 pr_info("------------------------------------\n");
665                 pr_info("%s%s    %s              %s        %s          %s\n",
666                         "T [desc]     [address 63:0  ] ",
667                         "[PlPOIdStDDt Ln] [bi->dma       ] ",
668                         "leng", "ntw", "timestamp", "bi->skb");
669
670                 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
671                         tx_desc = IXGBE_TX_DESC(tx_ring, i);
672                         tx_buffer = &tx_ring->tx_buffer_info[i];
673                         u0 = (struct my_u0 *)tx_desc;
674                         if (dma_unmap_len(tx_buffer, len) > 0) {
675                                 pr_info("T [0x%03X]    %016llX %016llX %016llX %08X %p %016llX %p",
676                                         i,
677                                         le64_to_cpu(u0->a),
678                                         le64_to_cpu(u0->b),
679                                         (u64)dma_unmap_addr(tx_buffer, dma),
680                                         dma_unmap_len(tx_buffer, len),
681                                         tx_buffer->next_to_watch,
682                                         (u64)tx_buffer->time_stamp,
683                                         tx_buffer->skb);
684                                 if (i == tx_ring->next_to_use &&
685                                         i == tx_ring->next_to_clean)
686                                         pr_cont(" NTC/U\n");
687                                 else if (i == tx_ring->next_to_use)
688                                         pr_cont(" NTU\n");
689                                 else if (i == tx_ring->next_to_clean)
690                                         pr_cont(" NTC\n");
691                                 else
692                                         pr_cont("\n");
693
694                                 if (netif_msg_pktdata(adapter) &&
695                                     tx_buffer->skb)
696                                         print_hex_dump(KERN_INFO, "",
697                                                 DUMP_PREFIX_ADDRESS, 16, 1,
698                                                 tx_buffer->skb->data,
699                                                 dma_unmap_len(tx_buffer, len),
700                                                 true);
701                         }
702                 }
703         }
704
705         /* Print RX Rings Summary */
706 rx_ring_summary:
707         dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
708         pr_info("Queue [NTU] [NTC]\n");
709         for (n = 0; n < adapter->num_rx_queues; n++) {
710                 rx_ring = adapter->rx_ring[n];
711                 pr_info("%5d %5X %5X\n",
712                         n, rx_ring->next_to_use, rx_ring->next_to_clean);
713         }
714
715         /* Print RX Rings */
716         if (!netif_msg_rx_status(adapter))
717                 return;
718
719         dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
720
721         /* Receive Descriptor Formats
722          *
723          * 82598 Advanced Receive Descriptor (Read) Format
724          *    63                                           1        0
725          *    +-----------------------------------------------------+
726          *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
727          *    +----------------------------------------------+------+
728          *  8 |       Header Buffer Address [63:1]           |  DD  |
729          *    +-----------------------------------------------------+
730          *
731          *
732          * 82598 Advanced Receive Descriptor (Write-Back) Format
733          *
734          *   63       48 47    32 31  30      21 20 16 15   4 3     0
735          *   +------------------------------------------------------+
736          * 0 |       RSS Hash /  |SPH| HDR_LEN  | RSV |Packet|  RSS |
737          *   | Packet   | IP     |   |          |     | Type | Type |
738          *   | Checksum | Ident  |   |          |     |      |      |
739          *   +------------------------------------------------------+
740          * 8 | VLAN Tag | Length | Extended Error | Extended Status |
741          *   +------------------------------------------------------+
742          *   63       48 47    32 31            20 19               0
743          *
744          * 82599+ Advanced Receive Descriptor (Read) Format
745          *    63                                           1        0
746          *    +-----------------------------------------------------+
747          *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
748          *    +----------------------------------------------+------+
749          *  8 |       Header Buffer Address [63:1]           |  DD  |
750          *    +-----------------------------------------------------+
751          *
752          *
753          * 82599+ Advanced Receive Descriptor (Write-Back) Format
754          *
755          *   63       48 47    32 31  30      21 20 17 16   4 3     0
756          *   +------------------------------------------------------+
757          * 0 |RSS / Frag Checksum|SPH| HDR_LEN  |RSC- |Packet|  RSS |
758          *   |/ RTT / PCoE_PARAM |   |          | CNT | Type | Type |
759          *   |/ Flow Dir Flt ID  |   |          |     |      |      |
760          *   +------------------------------------------------------+
761          * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
762          *   +------------------------------------------------------+
763          *   63       48 47    32 31          20 19                 0
764          */
765
766         for (n = 0; n < adapter->num_rx_queues; n++) {
767                 rx_ring = adapter->rx_ring[n];
768                 pr_info("------------------------------------\n");
769                 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
770                 pr_info("------------------------------------\n");
771                 pr_info("%s%s%s",
772                         "R  [desc]      [ PktBuf     A0] ",
773                         "[  HeadBuf   DD] [bi->dma       ] [bi->skb       ] ",
774                         "<-- Adv Rx Read format\n");
775                 pr_info("%s%s%s",
776                         "RWB[desc]      [PcsmIpSHl PtRs] ",
777                         "[vl er S cks ln] ---------------- [bi->skb       ] ",
778                         "<-- Adv Rx Write-Back format\n");
779
780                 for (i = 0; i < rx_ring->count; i++) {
781                         rx_buffer_info = &rx_ring->rx_buffer_info[i];
782                         rx_desc = IXGBE_RX_DESC(rx_ring, i);
783                         u0 = (struct my_u0 *)rx_desc;
784                         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
785                         if (staterr & IXGBE_RXD_STAT_DD) {
786                                 /* Descriptor Done */
787                                 pr_info("RWB[0x%03X]     %016llX "
788                                         "%016llX ---------------- %p", i,
789                                         le64_to_cpu(u0->a),
790                                         le64_to_cpu(u0->b),
791                                         rx_buffer_info->skb);
792                         } else {
793                                 pr_info("R  [0x%03X]     %016llX "
794                                         "%016llX %016llX %p", i,
795                                         le64_to_cpu(u0->a),
796                                         le64_to_cpu(u0->b),
797                                         (u64)rx_buffer_info->dma,
798                                         rx_buffer_info->skb);
799
800                                 if (netif_msg_pktdata(adapter) &&
801                                     rx_buffer_info->dma) {
802                                         print_hex_dump(KERN_INFO, "",
803                                            DUMP_PREFIX_ADDRESS, 16, 1,
804                                            page_address(rx_buffer_info->page) +
805                                                     rx_buffer_info->page_offset,
806                                            ixgbe_rx_bufsz(rx_ring), true);
807                                 }
808                         }
809
810                         if (i == rx_ring->next_to_use)
811                                 pr_cont(" NTU\n");
812                         else if (i == rx_ring->next_to_clean)
813                                 pr_cont(" NTC\n");
814                         else
815                                 pr_cont("\n");
816
817                 }
818         }
819 }
820
821 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
822 {
823         u32 ctrl_ext;
824
825         /* Let firmware take over control of h/w */
826         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
827         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
828                         ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
829 }
830
831 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
832 {
833         u32 ctrl_ext;
834
835         /* Let firmware know the driver has taken over */
836         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
837         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
838                         ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
839 }
840
841 /**
842  * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
843  * @adapter: pointer to adapter struct
844  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
845  * @queue: queue to map the corresponding interrupt to
846  * @msix_vector: the vector to map to the corresponding queue
847  *
848  */
849 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
850                            u8 queue, u8 msix_vector)
851 {
852         u32 ivar, index;
853         struct ixgbe_hw *hw = &adapter->hw;
854         switch (hw->mac.type) {
855         case ixgbe_mac_82598EB:
856                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
857                 if (direction == -1)
858                         direction = 0;
859                 index = (((direction * 64) + queue) >> 2) & 0x1F;
860                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
861                 ivar &= ~(0xFF << (8 * (queue & 0x3)));
862                 ivar |= (msix_vector << (8 * (queue & 0x3)));
863                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
864                 break;
865         case ixgbe_mac_82599EB:
866         case ixgbe_mac_X540:
867         case ixgbe_mac_X550:
868         case ixgbe_mac_X550EM_x:
869                 if (direction == -1) {
870                         /* other causes */
871                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
872                         index = ((queue & 1) * 8);
873                         ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
874                         ivar &= ~(0xFF << index);
875                         ivar |= (msix_vector << index);
876                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
877                         break;
878                 } else {
879                         /* tx or rx causes */
880                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
881                         index = ((16 * (queue & 1)) + (8 * direction));
882                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
883                         ivar &= ~(0xFF << index);
884                         ivar |= (msix_vector << index);
885                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
886                         break;
887                 }
888         default:
889                 break;
890         }
891 }
892
893 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
894                                           u64 qmask)
895 {
896         u32 mask;
897
898         switch (adapter->hw.mac.type) {
899         case ixgbe_mac_82598EB:
900                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
901                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
902                 break;
903         case ixgbe_mac_82599EB:
904         case ixgbe_mac_X540:
905         case ixgbe_mac_X550:
906         case ixgbe_mac_X550EM_x:
907                 mask = (qmask & 0xFFFFFFFF);
908                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
909                 mask = (qmask >> 32);
910                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
911                 break;
912         default:
913                 break;
914         }
915 }
916
917 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
918                                       struct ixgbe_tx_buffer *tx_buffer)
919 {
920         if (tx_buffer->skb) {
921                 dev_kfree_skb_any(tx_buffer->skb);
922                 if (dma_unmap_len(tx_buffer, len))
923                         dma_unmap_single(ring->dev,
924                                          dma_unmap_addr(tx_buffer, dma),
925                                          dma_unmap_len(tx_buffer, len),
926                                          DMA_TO_DEVICE);
927         } else if (dma_unmap_len(tx_buffer, len)) {
928                 dma_unmap_page(ring->dev,
929                                dma_unmap_addr(tx_buffer, dma),
930                                dma_unmap_len(tx_buffer, len),
931                                DMA_TO_DEVICE);
932         }
933         tx_buffer->next_to_watch = NULL;
934         tx_buffer->skb = NULL;
935         dma_unmap_len_set(tx_buffer, len, 0);
936         /* tx_buffer must be completely set up in the transmit path */
937 }
938
939 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
940 {
941         struct ixgbe_hw *hw = &adapter->hw;
942         struct ixgbe_hw_stats *hwstats = &adapter->stats;
943         int i;
944         u32 data;
945
946         if ((hw->fc.current_mode != ixgbe_fc_full) &&
947             (hw->fc.current_mode != ixgbe_fc_rx_pause))
948                 return;
949
950         switch (hw->mac.type) {
951         case ixgbe_mac_82598EB:
952                 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
953                 break;
954         default:
955                 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
956         }
957         hwstats->lxoffrxc += data;
958
959         /* refill credits (no tx hang) if we received xoff */
960         if (!data)
961                 return;
962
963         for (i = 0; i < adapter->num_tx_queues; i++)
964                 clear_bit(__IXGBE_HANG_CHECK_ARMED,
965                           &adapter->tx_ring[i]->state);
966 }
967
968 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
969 {
970         struct ixgbe_hw *hw = &adapter->hw;
971         struct ixgbe_hw_stats *hwstats = &adapter->stats;
972         u32 xoff[8] = {0};
973         u8 tc;
974         int i;
975         bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
976
977         if (adapter->ixgbe_ieee_pfc)
978                 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
979
980         if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
981                 ixgbe_update_xoff_rx_lfc(adapter);
982                 return;
983         }
984
985         /* update stats for each tc, only valid with PFC enabled */
986         for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
987                 u32 pxoffrxc;
988
989                 switch (hw->mac.type) {
990                 case ixgbe_mac_82598EB:
991                         pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
992                         break;
993                 default:
994                         pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
995                 }
996                 hwstats->pxoffrxc[i] += pxoffrxc;
997                 /* Get the TC for given UP */
998                 tc = netdev_get_prio_tc_map(adapter->netdev, i);
999                 xoff[tc] += pxoffrxc;
1000         }
1001
1002         /* disarm tx queues that have received xoff frames */
1003         for (i = 0; i < adapter->num_tx_queues; i++) {
1004                 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
1005
1006                 tc = tx_ring->dcb_tc;
1007                 if (xoff[tc])
1008                         clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1009         }
1010 }
1011
1012 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1013 {
1014         return ring->stats.packets;
1015 }
1016
1017 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1018 {
1019         struct ixgbe_adapter *adapter;
1020         struct ixgbe_hw *hw;
1021         u32 head, tail;
1022
1023         if (ring->l2_accel_priv)
1024                 adapter = ring->l2_accel_priv->real_adapter;
1025         else
1026                 adapter = netdev_priv(ring->netdev);
1027
1028         hw = &adapter->hw;
1029         head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
1030         tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
1031
1032         if (head != tail)
1033                 return (head < tail) ?
1034                         tail - head : (tail + ring->count - head);
1035
1036         return 0;
1037 }
1038
1039 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1040 {
1041         u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1042         u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1043         u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1044
1045         clear_check_for_tx_hang(tx_ring);
1046
1047         /*
1048          * Check for a hung queue, but be thorough. This verifies
1049          * that a transmit has been completed since the previous
1050          * check AND there is at least one packet pending. The
1051          * ARMED bit is set to indicate a potential hang. The
1052          * bit is cleared if a pause frame is received to remove
1053          * false hang detection due to PFC or 802.3x frames. By
1054          * requiring this to fail twice we avoid races with
1055          * pfc clearing the ARMED bit and conditions where we
1056          * run the check_tx_hang logic with a transmit completion
1057          * pending but without time to complete it yet.
1058          */
1059         if (tx_done_old == tx_done && tx_pending)
1060                 /* make sure it is true for two checks in a row */
1061                 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1062                                         &tx_ring->state);
1063         /* update completed stats and continue */
1064         tx_ring->tx_stats.tx_done_old = tx_done;
1065         /* reset the countdown */
1066         clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1067
1068         return false;
1069 }
1070
1071 /**
1072  * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
1073  * @adapter: driver private struct
1074  **/
1075 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1076 {
1077
1078         /* Do the reset outside of interrupt context */
1079         if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1080                 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
1081                 e_warn(drv, "initiating reset due to tx timeout\n");
1082                 ixgbe_service_event_schedule(adapter);
1083         }
1084 }
1085
1086 /**
1087  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
1088  * @q_vector: structure containing interrupt and ring information
1089  * @tx_ring: tx ring to clean
1090  **/
1091 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1092                                struct ixgbe_ring *tx_ring)
1093 {
1094         struct ixgbe_adapter *adapter = q_vector->adapter;
1095         struct ixgbe_tx_buffer *tx_buffer;
1096         union ixgbe_adv_tx_desc *tx_desc;
1097         unsigned int total_bytes = 0, total_packets = 0;
1098         unsigned int budget = q_vector->tx.work_limit;
1099         unsigned int i = tx_ring->next_to_clean;
1100
1101         if (test_bit(__IXGBE_DOWN, &adapter->state))
1102                 return true;
1103
1104         tx_buffer = &tx_ring->tx_buffer_info[i];
1105         tx_desc = IXGBE_TX_DESC(tx_ring, i);
1106         i -= tx_ring->count;
1107
1108         do {
1109                 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1110
1111                 /* if next_to_watch is not set then there is no work pending */
1112                 if (!eop_desc)
1113                         break;
1114
1115                 /* prevent any other reads prior to eop_desc */
1116                 read_barrier_depends();
1117
1118                 /* if DD is not set pending work has not been completed */
1119                 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1120                         break;
1121
1122                 /* clear next_to_watch to prevent false hangs */
1123                 tx_buffer->next_to_watch = NULL;
1124
1125                 /* update the statistics for this packet */
1126                 total_bytes += tx_buffer->bytecount;
1127                 total_packets += tx_buffer->gso_segs;
1128
1129                 /* free the skb */
1130                 dev_consume_skb_any(tx_buffer->skb);
1131
1132                 /* unmap skb header data */
1133                 dma_unmap_single(tx_ring->dev,
1134                                  dma_unmap_addr(tx_buffer, dma),
1135                                  dma_unmap_len(tx_buffer, len),
1136                                  DMA_TO_DEVICE);
1137
1138                 /* clear tx_buffer data */
1139                 tx_buffer->skb = NULL;
1140                 dma_unmap_len_set(tx_buffer, len, 0);
1141
1142                 /* unmap remaining buffers */
1143                 while (tx_desc != eop_desc) {
1144                         tx_buffer++;
1145                         tx_desc++;
1146                         i++;
1147                         if (unlikely(!i)) {
1148                                 i -= tx_ring->count;
1149                                 tx_buffer = tx_ring->tx_buffer_info;
1150                                 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1151                         }
1152
1153                         /* unmap any remaining paged data */
1154                         if (dma_unmap_len(tx_buffer, len)) {
1155                                 dma_unmap_page(tx_ring->dev,
1156                                                dma_unmap_addr(tx_buffer, dma),
1157                                                dma_unmap_len(tx_buffer, len),
1158                                                DMA_TO_DEVICE);
1159                                 dma_unmap_len_set(tx_buffer, len, 0);
1160                         }
1161                 }
1162
1163                 /* move us one more past the eop_desc for start of next pkt */
1164                 tx_buffer++;
1165                 tx_desc++;
1166                 i++;
1167                 if (unlikely(!i)) {
1168                         i -= tx_ring->count;
1169                         tx_buffer = tx_ring->tx_buffer_info;
1170                         tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1171                 }
1172
1173                 /* issue prefetch for next Tx descriptor */
1174                 prefetch(tx_desc);
1175
1176                 /* update budget accounting */
1177                 budget--;
1178         } while (likely(budget));
1179
1180         i += tx_ring->count;
1181         tx_ring->next_to_clean = i;
1182         u64_stats_update_begin(&tx_ring->syncp);
1183         tx_ring->stats.bytes += total_bytes;
1184         tx_ring->stats.packets += total_packets;
1185         u64_stats_update_end(&tx_ring->syncp);
1186         q_vector->tx.total_bytes += total_bytes;
1187         q_vector->tx.total_packets += total_packets;
1188
1189         if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1190                 /* schedule immediate reset if we believe we hung */
1191                 struct ixgbe_hw *hw = &adapter->hw;
1192                 e_err(drv, "Detected Tx Unit Hang\n"
1193                         "  Tx Queue             <%d>\n"
1194                         "  TDH, TDT             <%x>, <%x>\n"
1195                         "  next_to_use          <%x>\n"
1196                         "  next_to_clean        <%x>\n"
1197                         "tx_buffer_info[next_to_clean]\n"
1198                         "  time_stamp           <%lx>\n"
1199                         "  jiffies              <%lx>\n",
1200                         tx_ring->queue_index,
1201                         IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1202                         IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1203                         tx_ring->next_to_use, i,
1204                         tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1205
1206                 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1207
1208                 e_info(probe,
1209                        "tx hang %d detected on queue %d, resetting adapter\n",
1210                         adapter->tx_timeout_count + 1, tx_ring->queue_index);
1211
1212                 /* schedule immediate reset if we believe we hung */
1213                 ixgbe_tx_timeout_reset(adapter);
1214
1215                 /* the adapter is about to reset, no point in enabling stuff */
1216                 return true;
1217         }
1218
1219         netdev_tx_completed_queue(txring_txq(tx_ring),
1220                                   total_packets, total_bytes);
1221
1222 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1223         if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1224                      (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1225                 /* Make sure that anybody stopping the queue after this
1226                  * sees the new next_to_clean.
1227                  */
1228                 smp_mb();
1229                 if (__netif_subqueue_stopped(tx_ring->netdev,
1230                                              tx_ring->queue_index)
1231                     && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1232                         netif_wake_subqueue(tx_ring->netdev,
1233                                             tx_ring->queue_index);
1234                         ++tx_ring->tx_stats.restart_queue;
1235                 }
1236         }
1237
1238         return !!budget;
1239 }
1240
1241 #ifdef CONFIG_IXGBE_DCA
1242 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1243                                 struct ixgbe_ring *tx_ring,
1244                                 int cpu)
1245 {
1246         struct ixgbe_hw *hw = &adapter->hw;
1247         u32 txctrl = 0;
1248         u16 reg_offset;
1249
1250         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1251                 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1252
1253         switch (hw->mac.type) {
1254         case ixgbe_mac_82598EB:
1255                 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1256                 break;
1257         case ixgbe_mac_82599EB:
1258         case ixgbe_mac_X540:
1259                 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1260                 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1261                 break;
1262         default:
1263                 /* for unknown hardware do not write register */
1264                 return;
1265         }
1266
1267         /*
1268          * We can enable relaxed ordering for reads, but not writes when
1269          * DCA is enabled.  This is due to a known issue in some chipsets
1270          * which will cause the DCA tag to be cleared.
1271          */
1272         txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1273                   IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1274                   IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1275
1276         IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1277 }
1278
1279 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1280                                 struct ixgbe_ring *rx_ring,
1281                                 int cpu)
1282 {
1283         struct ixgbe_hw *hw = &adapter->hw;
1284         u32 rxctrl = 0;
1285         u8 reg_idx = rx_ring->reg_idx;
1286
1287         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1288                 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1289
1290         switch (hw->mac.type) {
1291         case ixgbe_mac_82599EB:
1292         case ixgbe_mac_X540:
1293                 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1294                 break;
1295         default:
1296                 break;
1297         }
1298
1299         /*
1300          * We can enable relaxed ordering for reads, but not writes when
1301          * DCA is enabled.  This is due to a known issue in some chipsets
1302          * which will cause the DCA tag to be cleared.
1303          */
1304         rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1305                   IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1306                   IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1307
1308         IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1309 }
1310
1311 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1312 {
1313         struct ixgbe_adapter *adapter = q_vector->adapter;
1314         struct ixgbe_ring *ring;
1315         int cpu = get_cpu();
1316
1317         if (q_vector->cpu == cpu)
1318                 goto out_no_update;
1319
1320         ixgbe_for_each_ring(ring, q_vector->tx)
1321                 ixgbe_update_tx_dca(adapter, ring, cpu);
1322
1323         ixgbe_for_each_ring(ring, q_vector->rx)
1324                 ixgbe_update_rx_dca(adapter, ring, cpu);
1325
1326         q_vector->cpu = cpu;
1327 out_no_update:
1328         put_cpu();
1329 }
1330
1331 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1332 {
1333         int i;
1334
1335         /* always use CB2 mode, difference is masked in the CB driver */
1336         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1337                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1338                                 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1339         else
1340                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1341                                 IXGBE_DCA_CTRL_DCA_DISABLE);
1342
1343         for (i = 0; i < adapter->num_q_vectors; i++) {
1344                 adapter->q_vector[i]->cpu = -1;
1345                 ixgbe_update_dca(adapter->q_vector[i]);
1346         }
1347 }
1348
1349 static int __ixgbe_notify_dca(struct device *dev, void *data)
1350 {
1351         struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1352         unsigned long event = *(unsigned long *)data;
1353
1354         if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1355                 return 0;
1356
1357         switch (event) {
1358         case DCA_PROVIDER_ADD:
1359                 /* if we're already enabled, don't do it again */
1360                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1361                         break;
1362                 if (dca_add_requester(dev) == 0) {
1363                         adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1364                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1365                                         IXGBE_DCA_CTRL_DCA_MODE_CB2);
1366                         break;
1367                 }
1368                 /* Fall Through since DCA is disabled. */
1369         case DCA_PROVIDER_REMOVE:
1370                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1371                         dca_remove_requester(dev);
1372                         adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1373                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1374                                         IXGBE_DCA_CTRL_DCA_DISABLE);
1375                 }
1376                 break;
1377         }
1378
1379         return 0;
1380 }
1381
1382 #endif /* CONFIG_IXGBE_DCA */
1383
1384 #define IXGBE_RSS_L4_TYPES_MASK \
1385         ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1386          (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1387          (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1388          (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1389
1390 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1391                                  union ixgbe_adv_rx_desc *rx_desc,
1392                                  struct sk_buff *skb)
1393 {
1394         u16 rss_type;
1395
1396         if (!(ring->netdev->features & NETIF_F_RXHASH))
1397                 return;
1398
1399         rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1400                    IXGBE_RXDADV_RSSTYPE_MASK;
1401
1402         if (!rss_type)
1403                 return;
1404
1405         skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1406                      (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1407                      PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1408 }
1409
1410 #ifdef IXGBE_FCOE
1411 /**
1412  * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1413  * @ring: structure containing ring specific data
1414  * @rx_desc: advanced rx descriptor
1415  *
1416  * Returns : true if it is FCoE pkt
1417  */
1418 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1419                                     union ixgbe_adv_rx_desc *rx_desc)
1420 {
1421         __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1422
1423         return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1424                ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1425                 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1426                              IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1427 }
1428
1429 #endif /* IXGBE_FCOE */
1430 /**
1431  * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1432  * @ring: structure containing ring specific data
1433  * @rx_desc: current Rx descriptor being processed
1434  * @skb: skb currently being received and modified
1435  **/
1436 static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1437                                      union ixgbe_adv_rx_desc *rx_desc,
1438                                      struct sk_buff *skb)
1439 {
1440         __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1441         __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1442         bool encap_pkt = false;
1443
1444         skb_checksum_none_assert(skb);
1445
1446         /* Rx csum disabled */
1447         if (!(ring->netdev->features & NETIF_F_RXCSUM))
1448                 return;
1449
1450         if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
1451             (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
1452                 encap_pkt = true;
1453                 skb->encapsulation = 1;
1454         }
1455
1456         /* if IP and error */
1457         if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1458             ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1459                 ring->rx_stats.csum_err++;
1460                 return;
1461         }
1462
1463         if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1464                 return;
1465
1466         if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1467                 /*
1468                  * 82599 errata, UDP frames with a 0 checksum can be marked as
1469                  * checksum errors.
1470                  */
1471                 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1472                     test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1473                         return;
1474
1475                 ring->rx_stats.csum_err++;
1476                 return;
1477         }
1478
1479         /* It must be a TCP or UDP packet with a valid checksum */
1480         skb->ip_summed = CHECKSUM_UNNECESSARY;
1481         if (encap_pkt) {
1482                 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1483                         return;
1484
1485                 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1486                         ring->rx_stats.csum_err++;
1487                         return;
1488                 }
1489                 /* If we checked the outer header let the stack know */
1490                 skb->csum_level = 1;
1491         }
1492 }
1493
1494 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1495                                     struct ixgbe_rx_buffer *bi)
1496 {
1497         struct page *page = bi->page;
1498         dma_addr_t dma;
1499
1500         /* since we are recycling buffers we should seldom need to alloc */
1501         if (likely(page))
1502                 return true;
1503
1504         /* alloc new page for storage */
1505         page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1506         if (unlikely(!page)) {
1507                 rx_ring->rx_stats.alloc_rx_page_failed++;
1508                 return false;
1509         }
1510
1511         /* map page for use */
1512         dma = dma_map_page(rx_ring->dev, page, 0,
1513                            ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1514
1515         /*
1516          * if mapping failed free memory back to system since
1517          * there isn't much point in holding memory we can't use
1518          */
1519         if (dma_mapping_error(rx_ring->dev, dma)) {
1520                 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1521
1522                 rx_ring->rx_stats.alloc_rx_page_failed++;
1523                 return false;
1524         }
1525
1526         bi->dma = dma;
1527         bi->page = page;
1528         bi->page_offset = 0;
1529
1530         return true;
1531 }
1532
1533 /**
1534  * ixgbe_alloc_rx_buffers - Replace used receive buffers
1535  * @rx_ring: ring to place buffers on
1536  * @cleaned_count: number of buffers to replace
1537  **/
1538 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1539 {
1540         union ixgbe_adv_rx_desc *rx_desc;
1541         struct ixgbe_rx_buffer *bi;
1542         u16 i = rx_ring->next_to_use;
1543
1544         /* nothing to do */
1545         if (!cleaned_count)
1546                 return;
1547
1548         rx_desc = IXGBE_RX_DESC(rx_ring, i);
1549         bi = &rx_ring->rx_buffer_info[i];
1550         i -= rx_ring->count;
1551
1552         do {
1553                 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1554                         break;
1555
1556                 /*
1557                  * Refresh the desc even if buffer_addrs didn't change
1558                  * because each write-back erases this info.
1559                  */
1560                 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1561
1562                 rx_desc++;
1563                 bi++;
1564                 i++;
1565                 if (unlikely(!i)) {
1566                         rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1567                         bi = rx_ring->rx_buffer_info;
1568                         i -= rx_ring->count;
1569                 }
1570
1571                 /* clear the status bits for the next_to_use descriptor */
1572                 rx_desc->wb.upper.status_error = 0;
1573
1574                 cleaned_count--;
1575         } while (cleaned_count);
1576
1577         i += rx_ring->count;
1578
1579         if (rx_ring->next_to_use != i) {
1580                 rx_ring->next_to_use = i;
1581
1582                 /* update next to alloc since we have filled the ring */
1583                 rx_ring->next_to_alloc = i;
1584
1585                 /* Force memory writes to complete before letting h/w
1586                  * know there are new descriptors to fetch.  (Only
1587                  * applicable for weak-ordered memory model archs,
1588                  * such as IA-64).
1589                  */
1590                 wmb();
1591                 writel(i, rx_ring->tail);
1592         }
1593 }
1594
1595 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1596                                    struct sk_buff *skb)
1597 {
1598         u16 hdr_len = skb_headlen(skb);
1599
1600         /* set gso_size to avoid messing up TCP MSS */
1601         skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1602                                                  IXGBE_CB(skb)->append_cnt);
1603         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1604 }
1605
1606 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1607                                    struct sk_buff *skb)
1608 {
1609         /* if append_cnt is 0 then frame is not RSC */
1610         if (!IXGBE_CB(skb)->append_cnt)
1611                 return;
1612
1613         rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1614         rx_ring->rx_stats.rsc_flush++;
1615
1616         ixgbe_set_rsc_gso_size(rx_ring, skb);
1617
1618         /* gso_size is computed using append_cnt so always clear it last */
1619         IXGBE_CB(skb)->append_cnt = 0;
1620 }
1621
1622 /**
1623  * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
1624  * @rx_ring: rx descriptor ring packet is being transacted on
1625  * @rx_desc: pointer to the EOP Rx descriptor
1626  * @skb: pointer to current skb being populated
1627  *
1628  * This function checks the ring, descriptor, and packet information in
1629  * order to populate the hash, checksum, VLAN, timestamp, protocol, and
1630  * other fields within the skb.
1631  **/
1632 static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1633                                      union ixgbe_adv_rx_desc *rx_desc,
1634                                      struct sk_buff *skb)
1635 {
1636         struct net_device *dev = rx_ring->netdev;
1637         u32 flags = rx_ring->q_vector->adapter->flags;
1638
1639         ixgbe_update_rsc_stats(rx_ring, skb);
1640
1641         ixgbe_rx_hash(rx_ring, rx_desc, skb);
1642
1643         ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1644
1645         if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1646                 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1647
1648         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1649             ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1650                 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1651                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1652         }
1653
1654         skb_record_rx_queue(skb, rx_ring->queue_index);
1655
1656         skb->protocol = eth_type_trans(skb, dev);
1657 }
1658
1659 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1660                          struct sk_buff *skb)
1661 {
1662         skb_mark_napi_id(skb, &q_vector->napi);
1663         if (ixgbe_qv_busy_polling(q_vector))
1664                 netif_receive_skb(skb);
1665         else
1666                 napi_gro_receive(&q_vector->napi, skb);
1667 }
1668
1669 /**
1670  * ixgbe_is_non_eop - process handling of non-EOP buffers
1671  * @rx_ring: Rx ring being processed
1672  * @rx_desc: Rx descriptor for current buffer
1673  * @skb: Current socket buffer containing buffer in progress
1674  *
1675  * This function updates next to clean.  If the buffer is an EOP buffer
1676  * this function exits returning false, otherwise it will place the
1677  * sk_buff in the next buffer to be chained and return true indicating
1678  * that this is in fact a non-EOP buffer.
1679  **/
1680 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1681                              union ixgbe_adv_rx_desc *rx_desc,
1682                              struct sk_buff *skb)
1683 {
1684         u32 ntc = rx_ring->next_to_clean + 1;
1685
1686         /* fetch, update, and store next to clean */
1687         ntc = (ntc < rx_ring->count) ? ntc : 0;
1688         rx_ring->next_to_clean = ntc;
1689
1690         prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1691
1692         /* update RSC append count if present */
1693         if (ring_is_rsc_enabled(rx_ring)) {
1694                 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1695                                      cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1696
1697                 if (unlikely(rsc_enabled)) {
1698                         u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1699
1700                         rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1701                         IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1702
1703                         /* update ntc based on RSC value */
1704                         ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1705                         ntc &= IXGBE_RXDADV_NEXTP_MASK;
1706                         ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1707                 }
1708         }
1709
1710         /* if we are the last buffer then there is nothing else to do */
1711         if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1712                 return false;
1713
1714         /* place skb in next buffer to be received */
1715         rx_ring->rx_buffer_info[ntc].skb = skb;
1716         rx_ring->rx_stats.non_eop_descs++;
1717
1718         return true;
1719 }
1720
1721 /**
1722  * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
1723  * @rx_ring: rx descriptor ring packet is being transacted on
1724  * @skb: pointer to current skb being adjusted
1725  *
1726  * This function is an ixgbe specific version of __pskb_pull_tail.  The
1727  * main difference between this version and the original function is that
1728  * this function can make several assumptions about the state of things
1729  * that allow for significant optimizations versus the standard function.
1730  * As a result we can do things like drop a frag and maintain an accurate
1731  * truesize for the skb.
1732  */
1733 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1734                             struct sk_buff *skb)
1735 {
1736         struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1737         unsigned char *va;
1738         unsigned int pull_len;
1739
1740         /*
1741          * it is valid to use page_address instead of kmap since we are
1742          * working with pages allocated out of the lomem pool per
1743          * alloc_page(GFP_ATOMIC)
1744          */
1745         va = skb_frag_address(frag);
1746
1747         /*
1748          * we need the header to contain the greater of either ETH_HLEN or
1749          * 60 bytes if the skb->len is less than 60 for skb_pad.
1750          */
1751         pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
1752
1753         /* align pull length to size of long to optimize memcpy performance */
1754         skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1755
1756         /* update all of the pointers */
1757         skb_frag_size_sub(frag, pull_len);
1758         frag->page_offset += pull_len;
1759         skb->data_len -= pull_len;
1760         skb->tail += pull_len;
1761 }
1762
1763 /**
1764  * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
1765  * @rx_ring: rx descriptor ring packet is being transacted on
1766  * @skb: pointer to current skb being updated
1767  *
1768  * This function provides a basic DMA sync up for the first fragment of an
1769  * skb.  The reason for doing this is that the first fragment cannot be
1770  * unmapped until we have reached the end of packet descriptor for a buffer
1771  * chain.
1772  */
1773 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1774                                 struct sk_buff *skb)
1775 {
1776         /* if the page was released unmap it, else just sync our portion */
1777         if (unlikely(IXGBE_CB(skb)->page_released)) {
1778                 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1779                                ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1780                 IXGBE_CB(skb)->page_released = false;
1781         } else {
1782                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1783
1784                 dma_sync_single_range_for_cpu(rx_ring->dev,
1785                                               IXGBE_CB(skb)->dma,
1786                                               frag->page_offset,
1787                                               ixgbe_rx_bufsz(rx_ring),
1788                                               DMA_FROM_DEVICE);
1789         }
1790         IXGBE_CB(skb)->dma = 0;
1791 }
1792
1793 /**
1794  * ixgbe_cleanup_headers - Correct corrupted or empty headers
1795  * @rx_ring: rx descriptor ring packet is being transacted on
1796  * @rx_desc: pointer to the EOP Rx descriptor
1797  * @skb: pointer to current skb being fixed
1798  *
1799  * Check for corrupted packet headers caused by senders on the local L2
1800  * embedded NIC switch not setting up their Tx Descriptors right.  These
1801  * should be very rare.
1802  *
1803  * Also address the case where we are pulling data in on pages only
1804  * and as such no data is present in the skb header.
1805  *
1806  * In addition if skb is not at least 60 bytes we need to pad it so that
1807  * it is large enough to qualify as a valid Ethernet frame.
1808  *
1809  * Returns true if an error was encountered and skb was freed.
1810  **/
1811 static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1812                                   union ixgbe_adv_rx_desc *rx_desc,
1813                                   struct sk_buff *skb)
1814 {
1815         struct net_device *netdev = rx_ring->netdev;
1816
1817         /* verify that the packet does not have any known errors */
1818         if (unlikely(ixgbe_test_staterr(rx_desc,
1819                                         IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1820             !(netdev->features & NETIF_F_RXALL))) {
1821                 dev_kfree_skb_any(skb);
1822                 return true;
1823         }
1824
1825         /* place header in linear portion of buffer */
1826         if (skb_is_nonlinear(skb))
1827                 ixgbe_pull_tail(rx_ring, skb);
1828
1829 #ifdef IXGBE_FCOE
1830         /* do not attempt to pad FCoE Frames as this will disrupt DDP */
1831         if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1832                 return false;
1833
1834 #endif
1835         /* if eth_skb_pad returns an error the skb was freed */
1836         if (eth_skb_pad(skb))
1837                 return true;
1838
1839         return false;
1840 }
1841
1842 /**
1843  * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
1844  * @rx_ring: rx descriptor ring to store buffers on
1845  * @old_buff: donor buffer to have page reused
1846  *
1847  * Synchronizes page for reuse by the adapter
1848  **/
1849 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1850                                 struct ixgbe_rx_buffer *old_buff)
1851 {
1852         struct ixgbe_rx_buffer *new_buff;
1853         u16 nta = rx_ring->next_to_alloc;
1854
1855         new_buff = &rx_ring->rx_buffer_info[nta];
1856
1857         /* update, and store next to alloc */
1858         nta++;
1859         rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1860
1861         /* transfer page from old buffer to new buffer */
1862         *new_buff = *old_buff;
1863
1864         /* sync the buffer for use by the device */
1865         dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1866                                          new_buff->page_offset,
1867                                          ixgbe_rx_bufsz(rx_ring),
1868                                          DMA_FROM_DEVICE);
1869 }
1870
1871 static inline bool ixgbe_page_is_reserved(struct page *page)
1872 {
1873         return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1874 }
1875
1876 /**
1877  * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
1878  * @rx_ring: rx descriptor ring to transact packets on
1879  * @rx_buffer: buffer containing page to add
1880  * @rx_desc: descriptor containing length of buffer written by hardware
1881  * @skb: sk_buff to place the data into
1882  *
1883  * This function will add the data contained in rx_buffer->page to the skb.
1884  * This is done either through a direct copy if the data in the buffer is
1885  * less than the skb header size, otherwise it will just attach the page as
1886  * a frag to the skb.
1887  *
1888  * The function will then update the page offset if necessary and return
1889  * true if the buffer can be reused by the adapter.
1890  **/
1891 static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1892                               struct ixgbe_rx_buffer *rx_buffer,
1893                               union ixgbe_adv_rx_desc *rx_desc,
1894                               struct sk_buff *skb)
1895 {
1896         struct page *page = rx_buffer->page;
1897         unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1898 #if (PAGE_SIZE < 8192)
1899         unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1900 #else
1901         unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1902         unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1903                                    ixgbe_rx_bufsz(rx_ring);
1904 #endif
1905
1906         if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1907                 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1908
1909                 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1910
1911                 /* page is not reserved, we can reuse buffer as-is */
1912                 if (likely(!ixgbe_page_is_reserved(page)))
1913                         return true;
1914
1915                 /* this page cannot be reused so discard it */
1916                 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1917                 return false;
1918         }
1919
1920         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1921                         rx_buffer->page_offset, size, truesize);
1922
1923         /* avoid re-using remote pages */
1924         if (unlikely(ixgbe_page_is_reserved(page)))
1925                 return false;
1926
1927 #if (PAGE_SIZE < 8192)
1928         /* if we are only owner of page we can reuse it */
1929         if (unlikely(page_count(page) != 1))
1930                 return false;
1931
1932         /* flip page offset to other buffer */
1933         rx_buffer->page_offset ^= truesize;
1934 #else
1935         /* move offset up to the next cache line */
1936         rx_buffer->page_offset += truesize;
1937
1938         if (rx_buffer->page_offset > last_offset)
1939                 return false;
1940 #endif
1941
1942         /* Even if we own the page, we are not allowed to use atomic_set()
1943          * This would break get_page_unless_zero() users.
1944          */
1945         atomic_inc(&page->_count);
1946
1947         return true;
1948 }
1949
1950 static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1951                                              union ixgbe_adv_rx_desc *rx_desc)
1952 {
1953         struct ixgbe_rx_buffer *rx_buffer;
1954         struct sk_buff *skb;
1955         struct page *page;
1956
1957         rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1958         page = rx_buffer->page;
1959         prefetchw(page);
1960
1961         skb = rx_buffer->skb;
1962
1963         if (likely(!skb)) {
1964                 void *page_addr = page_address(page) +
1965                                   rx_buffer->page_offset;
1966
1967                 /* prefetch first cache line of first page */
1968                 prefetch(page_addr);
1969 #if L1_CACHE_BYTES < 128
1970                 prefetch(page_addr + L1_CACHE_BYTES);
1971 #endif
1972
1973                 /* allocate a skb to store the frags */
1974                 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
1975                                      IXGBE_RX_HDR_SIZE);
1976                 if (unlikely(!skb)) {
1977                         rx_ring->rx_stats.alloc_rx_buff_failed++;
1978                         return NULL;
1979                 }
1980
1981                 /*
1982                  * we will be copying header into skb->data in
1983                  * pskb_may_pull so it is in our interest to prefetch
1984                  * it now to avoid a possible cache miss
1985                  */
1986                 prefetchw(skb->data);
1987
1988                 /*
1989                  * Delay unmapping of the first packet. It carries the
1990                  * header information, HW may still access the header
1991                  * after the writeback.  Only unmap it when EOP is
1992                  * reached
1993                  */
1994                 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1995                         goto dma_sync;
1996
1997                 IXGBE_CB(skb)->dma = rx_buffer->dma;
1998         } else {
1999                 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2000                         ixgbe_dma_sync_frag(rx_ring, skb);
2001
2002 dma_sync:
2003                 /* we are reusing so sync this buffer for CPU use */
2004                 dma_sync_single_range_for_cpu(rx_ring->dev,
2005                                               rx_buffer->dma,
2006                                               rx_buffer->page_offset,
2007                                               ixgbe_rx_bufsz(rx_ring),
2008                                               DMA_FROM_DEVICE);
2009
2010                 rx_buffer->skb = NULL;
2011         }
2012
2013         /* pull page into skb */
2014         if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
2015                 /* hand second half of page back to the ring */
2016                 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2017         } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
2018                 /* the page has been released from the ring */
2019                 IXGBE_CB(skb)->page_released = true;
2020         } else {
2021                 /* we are not reusing the buffer so unmap it */
2022                 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
2023                                ixgbe_rx_pg_size(rx_ring),
2024                                DMA_FROM_DEVICE);
2025         }
2026
2027         /* clear contents of buffer_info */
2028         rx_buffer->page = NULL;
2029
2030         return skb;
2031 }
2032
2033 /**
2034  * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2035  * @q_vector: structure containing interrupt and ring information
2036  * @rx_ring: rx descriptor ring to transact packets on
2037  * @budget: Total limit on number of packets to process
2038  *
2039  * This function provides a "bounce buffer" approach to Rx interrupt
2040  * processing.  The advantage to this is that on systems that have
2041  * expensive overhead for IOMMU access this provides a means of avoiding
2042  * it by maintaining the mapping of the page to the syste.
2043  *
2044  * Returns amount of work completed
2045  **/
2046 static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2047                                struct ixgbe_ring *rx_ring,
2048                                const int budget)
2049 {
2050         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2051 #ifdef IXGBE_FCOE
2052         struct ixgbe_adapter *adapter = q_vector->adapter;
2053         int ddp_bytes;
2054         unsigned int mss = 0;
2055 #endif /* IXGBE_FCOE */
2056         u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2057
2058         while (likely(total_rx_packets < budget)) {
2059                 union ixgbe_adv_rx_desc *rx_desc;
2060                 struct sk_buff *skb;
2061
2062                 /* return some buffers to hardware, one at a time is too slow */
2063                 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2064                         ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2065                         cleaned_count = 0;
2066                 }
2067
2068                 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2069
2070                 if (!rx_desc->wb.upper.status_error)
2071                         break;
2072
2073                 /* This memory barrier is needed to keep us from reading
2074                  * any other fields out of the rx_desc until we know the
2075                  * descriptor has been written back
2076                  */
2077                 dma_rmb();
2078
2079                 /* retrieve a buffer from the ring */
2080                 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
2081
2082                 /* exit if we failed to retrieve a buffer */
2083                 if (!skb)
2084                         break;
2085
2086                 cleaned_count++;
2087
2088                 /* place incomplete frames back on ring for completion */
2089                 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2090                         continue;
2091
2092                 /* verify the packet layout is correct */
2093                 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2094                         continue;
2095
2096                 /* probably a little skewed due to removing CRC */
2097                 total_rx_bytes += skb->len;
2098
2099                 /* populate checksum, timestamp, VLAN, and protocol */
2100                 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2101
2102 #ifdef IXGBE_FCOE
2103                 /* if ddp, not passing to ULD unless for FCP_RSP or error */
2104                 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2105                         ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2106                         /* include DDPed FCoE data */
2107                         if (ddp_bytes > 0) {
2108                                 if (!mss) {
2109                                         mss = rx_ring->netdev->mtu -
2110                                                 sizeof(struct fcoe_hdr) -
2111                                                 sizeof(struct fc_frame_header) -
2112                                                 sizeof(struct fcoe_crc_eof);
2113                                         if (mss > 512)
2114                                                 mss &= ~511;
2115                                 }
2116                                 total_rx_bytes += ddp_bytes;
2117                                 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2118                                                                  mss);
2119                         }
2120                         if (!ddp_bytes) {
2121                                 dev_kfree_skb_any(skb);
2122                                 continue;
2123                         }
2124                 }
2125
2126 #endif /* IXGBE_FCOE */
2127                 ixgbe_rx_skb(q_vector, skb);
2128
2129                 /* update budget accounting */
2130                 total_rx_packets++;
2131         }
2132
2133         u64_stats_update_begin(&rx_ring->syncp);
2134         rx_ring->stats.packets += total_rx_packets;
2135         rx_ring->stats.bytes += total_rx_bytes;
2136         u64_stats_update_end(&rx_ring->syncp);
2137         q_vector->rx.total_packets += total_rx_packets;
2138         q_vector->rx.total_bytes += total_rx_bytes;
2139
2140         return total_rx_packets;
2141 }
2142
2143 #ifdef CONFIG_NET_RX_BUSY_POLL
2144 /* must be called with local_bh_disable()d */
2145 static int ixgbe_low_latency_recv(struct napi_struct *napi)
2146 {
2147         struct ixgbe_q_vector *q_vector =
2148                         container_of(napi, struct ixgbe_q_vector, napi);
2149         struct ixgbe_adapter *adapter = q_vector->adapter;
2150         struct ixgbe_ring  *ring;
2151         int found = 0;
2152
2153         if (test_bit(__IXGBE_DOWN, &adapter->state))
2154                 return LL_FLUSH_FAILED;
2155
2156         if (!ixgbe_qv_lock_poll(q_vector))
2157                 return LL_FLUSH_BUSY;
2158
2159         ixgbe_for_each_ring(ring, q_vector->rx) {
2160                 found = ixgbe_clean_rx_irq(q_vector, ring, 4);
2161 #ifdef BP_EXTENDED_STATS
2162                 if (found)
2163                         ring->stats.cleaned += found;
2164                 else
2165                         ring->stats.misses++;
2166 #endif
2167                 if (found)
2168                         break;
2169         }
2170
2171         ixgbe_qv_unlock_poll(q_vector);
2172
2173         return found;
2174 }
2175 #endif  /* CONFIG_NET_RX_BUSY_POLL */
2176
2177 /**
2178  * ixgbe_configure_msix - Configure MSI-X hardware
2179  * @adapter: board private structure
2180  *
2181  * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
2182  * interrupts.
2183  **/
2184 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2185 {
2186         struct ixgbe_q_vector *q_vector;
2187         int v_idx;
2188         u32 mask;
2189
2190         /* Populate MSIX to EITR Select */
2191         if (adapter->num_vfs > 32) {
2192                 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2193                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2194         }
2195
2196         /*
2197          * Populate the IVAR table and set the ITR values to the
2198          * corresponding register.
2199          */
2200         for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2201                 struct ixgbe_ring *ring;
2202                 q_vector = adapter->q_vector[v_idx];
2203
2204                 ixgbe_for_each_ring(ring, q_vector->rx)
2205                         ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2206
2207                 ixgbe_for_each_ring(ring, q_vector->tx)
2208                         ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2209
2210                 ixgbe_write_eitr(q_vector);
2211         }
2212
2213         switch (adapter->hw.mac.type) {
2214         case ixgbe_mac_82598EB:
2215                 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2216                                v_idx);
2217                 break;
2218         case ixgbe_mac_82599EB:
2219         case ixgbe_mac_X540:
2220         case ixgbe_mac_X550:
2221         case ixgbe_mac_X550EM_x:
2222                 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2223                 break;
2224         default:
2225                 break;
2226         }
2227         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2228
2229         /* set up to autoclear timer, and the vectors */
2230         mask = IXGBE_EIMS_ENABLE_MASK;
2231         mask &= ~(IXGBE_EIMS_OTHER |
2232                   IXGBE_EIMS_MAILBOX |
2233                   IXGBE_EIMS_LSC);
2234
2235         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2236 }
2237
2238 enum latency_range {
2239         lowest_latency = 0,
2240         low_latency = 1,
2241         bulk_latency = 2,
2242         latency_invalid = 255
2243 };
2244
2245 /**
2246  * ixgbe_update_itr - update the dynamic ITR value based on statistics
2247  * @q_vector: structure containing interrupt and ring information
2248  * @ring_container: structure containing ring performance data
2249  *
2250  *      Stores a new ITR value based on packets and byte
2251  *      counts during the last interrupt.  The advantage of per interrupt
2252  *      computation is faster updates and more accurate ITR for the current
2253  *      traffic pattern.  Constants in this function were computed
2254  *      based on theoretical maximum wire speed and thresholds were set based
2255  *      on testing data as well as attempting to minimize response time
2256  *      while increasing bulk throughput.
2257  *      this functionality is controlled by the InterruptThrottleRate module
2258  *      parameter (see ixgbe_param.c)
2259  **/
2260 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2261                              struct ixgbe_ring_container *ring_container)
2262 {
2263         int bytes = ring_container->total_bytes;
2264         int packets = ring_container->total_packets;
2265         u32 timepassed_us;
2266         u64 bytes_perint;
2267         u8 itr_setting = ring_container->itr;
2268
2269         if (packets == 0)
2270                 return;
2271
2272         /* simple throttlerate management
2273          *   0-10MB/s   lowest (100000 ints/s)
2274          *  10-20MB/s   low    (20000 ints/s)
2275          *  20-1249MB/s bulk   (12000 ints/s)
2276          */
2277         /* what was last interrupt timeslice? */
2278         timepassed_us = q_vector->itr >> 2;
2279         if (timepassed_us == 0)
2280                 return;
2281
2282         bytes_perint = bytes / timepassed_us; /* bytes/usec */
2283
2284         switch (itr_setting) {
2285         case lowest_latency:
2286                 if (bytes_perint > 10)
2287                         itr_setting = low_latency;
2288                 break;
2289         case low_latency:
2290                 if (bytes_perint > 20)
2291                         itr_setting = bulk_latency;
2292                 else if (bytes_perint <= 10)
2293                         itr_setting = lowest_latency;
2294                 break;
2295         case bulk_latency:
2296                 if (bytes_perint <= 20)
2297                         itr_setting = low_latency;
2298                 break;
2299         }
2300
2301         /* clear work counters since we have the values we need */
2302         ring_container->total_bytes = 0;
2303         ring_container->total_packets = 0;
2304
2305         /* write updated itr to ring container */
2306         ring_container->itr = itr_setting;
2307 }
2308
2309 /**
2310  * ixgbe_write_eitr - write EITR register in hardware specific way
2311  * @q_vector: structure containing interrupt and ring information
2312  *
2313  * This function is made to be called by ethtool and by the driver
2314  * when it needs to update EITR registers at runtime.  Hardware
2315  * specific quirks/differences are taken care of here.
2316  */
2317 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2318 {
2319         struct ixgbe_adapter *adapter = q_vector->adapter;
2320         struct ixgbe_hw *hw = &adapter->hw;
2321         int v_idx = q_vector->v_idx;
2322         u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2323
2324         switch (adapter->hw.mac.type) {
2325         case ixgbe_mac_82598EB:
2326                 /* must write high and low 16 bits to reset counter */
2327                 itr_reg |= (itr_reg << 16);
2328                 break;
2329         case ixgbe_mac_82599EB:
2330         case ixgbe_mac_X540:
2331         case ixgbe_mac_X550:
2332         case ixgbe_mac_X550EM_x:
2333                 /*
2334                  * set the WDIS bit to not clear the timer bits and cause an
2335                  * immediate assertion of the interrupt
2336                  */
2337                 itr_reg |= IXGBE_EITR_CNT_WDIS;
2338                 break;
2339         default:
2340                 break;
2341         }
2342         IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2343 }
2344
2345 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2346 {
2347         u32 new_itr = q_vector->itr;
2348         u8 current_itr;
2349
2350         ixgbe_update_itr(q_vector, &q_vector->tx);
2351         ixgbe_update_itr(q_vector, &q_vector->rx);
2352
2353         current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2354
2355         switch (current_itr) {
2356         /* counts and packets in update_itr are dependent on these numbers */
2357         case lowest_latency:
2358                 new_itr = IXGBE_100K_ITR;
2359                 break;
2360         case low_latency:
2361                 new_itr = IXGBE_20K_ITR;
2362                 break;
2363         case bulk_latency:
2364                 new_itr = IXGBE_12K_ITR;
2365                 break;
2366         default:
2367                 break;
2368         }
2369
2370         if (new_itr != q_vector->itr) {
2371                 /* do an exponential smoothing */
2372                 new_itr = (10 * new_itr * q_vector->itr) /
2373                           ((9 * new_itr) + q_vector->itr);
2374
2375                 /* save the algorithm value here */
2376                 q_vector->itr = new_itr;
2377
2378                 ixgbe_write_eitr(q_vector);
2379         }
2380 }
2381
2382 /**
2383  * ixgbe_check_overtemp_subtask - check for over temperature
2384  * @adapter: pointer to adapter
2385  **/
2386 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2387 {
2388         struct ixgbe_hw *hw = &adapter->hw;
2389         u32 eicr = adapter->interrupt_event;
2390
2391         if (test_bit(__IXGBE_DOWN, &adapter->state))
2392                 return;
2393
2394         if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2395             !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2396                 return;
2397
2398         adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2399
2400         switch (hw->device_id) {
2401         case IXGBE_DEV_ID_82599_T3_LOM:
2402                 /*
2403                  * Since the warning interrupt is for both ports
2404                  * we don't have to check if:
2405                  *  - This interrupt wasn't for our port.
2406                  *  - We may have missed the interrupt so always have to
2407                  *    check if we  got a LSC
2408                  */
2409                 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2410                     !(eicr & IXGBE_EICR_LSC))
2411                         return;
2412
2413                 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2414                         u32 speed;
2415                         bool link_up = false;
2416
2417                         hw->mac.ops.check_link(hw, &speed, &link_up, false);
2418
2419                         if (link_up)
2420                                 return;
2421                 }
2422
2423                 /* Check if this is not due to overtemp */
2424                 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2425                         return;
2426
2427                 break;
2428         default:
2429                 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2430                         return;
2431                 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2432                         return;
2433                 break;
2434         }
2435         e_crit(drv, "%s\n", ixgbe_overheat_msg);
2436
2437         adapter->interrupt_event = 0;
2438 }
2439
2440 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2441 {
2442         struct ixgbe_hw *hw = &adapter->hw;
2443
2444         if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2445             (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2446                 e_crit(probe, "Fan has stopped, replace the adapter\n");
2447                 /* write to clear the interrupt */
2448                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2449         }
2450 }
2451
2452 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2453 {
2454         struct ixgbe_hw *hw = &adapter->hw;
2455
2456         if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2457                 return;
2458
2459         switch (adapter->hw.mac.type) {
2460         case ixgbe_mac_82599EB:
2461                 /*
2462                  * Need to check link state so complete overtemp check
2463                  * on service task
2464                  */
2465                 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2466                      (eicr & IXGBE_EICR_LSC)) &&
2467                     (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2468                         adapter->interrupt_event = eicr;
2469                         adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2470                         ixgbe_service_event_schedule(adapter);
2471                         return;
2472                 }
2473                 return;
2474         case ixgbe_mac_X540:
2475                 if (!(eicr & IXGBE_EICR_TS))
2476                         return;
2477                 break;
2478         default:
2479                 return;
2480         }
2481
2482         e_crit(drv, "%s\n", ixgbe_overheat_msg);
2483 }
2484
2485 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2486 {
2487         switch (hw->mac.type) {
2488         case ixgbe_mac_82598EB:
2489                 if (hw->phy.type == ixgbe_phy_nl)
2490                         return true;
2491                 return false;
2492         case ixgbe_mac_82599EB:
2493         case ixgbe_mac_X550EM_x:
2494                 switch (hw->mac.ops.get_media_type(hw)) {
2495                 case ixgbe_media_type_fiber:
2496                 case ixgbe_media_type_fiber_qsfp:
2497                         return true;
2498                 default:
2499                         return false;
2500                 }
2501         default:
2502                 return false;
2503         }
2504 }
2505
2506 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2507 {
2508         struct ixgbe_hw *hw = &adapter->hw;
2509         u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2510
2511         if (!ixgbe_is_sfp(hw))
2512                 return;
2513
2514         /* Later MAC's use different SDP */
2515         if (hw->mac.type >= ixgbe_mac_X540)
2516                 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2517
2518         if (eicr & eicr_mask) {
2519                 /* Clear the interrupt */
2520                 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2521                 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2522                         adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2523                         adapter->sfp_poll_time = 0;
2524                         ixgbe_service_event_schedule(adapter);
2525                 }
2526         }
2527
2528         if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2529             (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2530                 /* Clear the interrupt */
2531                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2532                 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2533                         adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2534                         ixgbe_service_event_schedule(adapter);
2535                 }
2536         }
2537 }
2538
2539 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2540 {
2541         struct ixgbe_hw *hw = &adapter->hw;
2542
2543         adapter->lsc_int++;
2544         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2545         adapter->link_check_timeout = jiffies;
2546         if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2547                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2548                 IXGBE_WRITE_FLUSH(hw);
2549                 ixgbe_service_event_schedule(adapter);
2550         }
2551 }
2552
2553 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2554                                            u64 qmask)
2555 {
2556         u32 mask;
2557         struct ixgbe_hw *hw = &adapter->hw;
2558
2559         switch (hw->mac.type) {
2560         case ixgbe_mac_82598EB:
2561                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2562                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2563                 break;
2564         case ixgbe_mac_82599EB:
2565         case ixgbe_mac_X540:
2566         case ixgbe_mac_X550:
2567         case ixgbe_mac_X550EM_x:
2568                 mask = (qmask & 0xFFFFFFFF);
2569                 if (mask)
2570                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2571                 mask = (qmask >> 32);
2572                 if (mask)
2573                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2574                 break;
2575         default:
2576                 break;
2577         }
2578         /* skip the flush */
2579 }
2580
2581 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2582                                             u64 qmask)
2583 {
2584         u32 mask;
2585         struct ixgbe_hw *hw = &adapter->hw;
2586
2587         switch (hw->mac.type) {
2588         case ixgbe_mac_82598EB:
2589                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2590                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2591                 break;
2592         case ixgbe_mac_82599EB:
2593         case ixgbe_mac_X540:
2594         case ixgbe_mac_X550:
2595         case ixgbe_mac_X550EM_x:
2596                 mask = (qmask & 0xFFFFFFFF);
2597                 if (mask)
2598                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2599                 mask = (qmask >> 32);
2600                 if (mask)
2601                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2602                 break;
2603         default:
2604                 break;
2605         }
2606         /* skip the flush */
2607 }
2608
2609 /**
2610  * ixgbe_irq_enable - Enable default interrupt generation settings
2611  * @adapter: board private structure
2612  **/
2613 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2614                                     bool flush)
2615 {
2616         struct ixgbe_hw *hw = &adapter->hw;
2617         u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2618
2619         /* don't reenable LSC while waiting for link */
2620         if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2621                 mask &= ~IXGBE_EIMS_LSC;
2622
2623         if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2624                 switch (adapter->hw.mac.type) {
2625                 case ixgbe_mac_82599EB:
2626                         mask |= IXGBE_EIMS_GPI_SDP0(hw);
2627                         break;
2628                 case ixgbe_mac_X540:
2629                 case ixgbe_mac_X550:
2630                 case ixgbe_mac_X550EM_x:
2631                         mask |= IXGBE_EIMS_TS;
2632                         break;
2633                 default:
2634                         break;
2635                 }
2636         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2637                 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2638         switch (adapter->hw.mac.type) {
2639         case ixgbe_mac_82599EB:
2640                 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2641                 mask |= IXGBE_EIMS_GPI_SDP2(hw);
2642                 /* fall through */
2643         case ixgbe_mac_X540:
2644         case ixgbe_mac_X550:
2645         case ixgbe_mac_X550EM_x:
2646                 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2647                         mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
2648                 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
2649                         mask |= IXGBE_EICR_GPI_SDP0_X540;
2650                 mask |= IXGBE_EIMS_ECC;
2651                 mask |= IXGBE_EIMS_MAILBOX;
2652                 break;
2653         default:
2654                 break;
2655         }
2656
2657         if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2658             !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2659                 mask |= IXGBE_EIMS_FLOW_DIR;
2660
2661         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2662         if (queues)
2663                 ixgbe_irq_enable_queues(adapter, ~0);
2664         if (flush)
2665                 IXGBE_WRITE_FLUSH(&adapter->hw);
2666 }
2667
2668 static irqreturn_t ixgbe_msix_other(int irq, void *data)
2669 {
2670         struct ixgbe_adapter *adapter = data;
2671         struct ixgbe_hw *hw = &adapter->hw;
2672         u32 eicr;
2673
2674         /*
2675          * Workaround for Silicon errata.  Use clear-by-write instead
2676          * of clear-by-read.  Reading with EICS will return the
2677          * interrupt causes without clearing, which later be done
2678          * with the write to EICR.
2679          */
2680         eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2681
2682         /* The lower 16bits of the EICR register are for the queue interrupts
2683          * which should be masked here in order to not accidentally clear them if
2684          * the bits are high when ixgbe_msix_other is called. There is a race
2685          * condition otherwise which results in possible performance loss
2686          * especially if the ixgbe_msix_other interrupt is triggering
2687          * consistently (as it would when PPS is turned on for the X540 device)
2688          */
2689         eicr &= 0xFFFF0000;
2690
2691         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2692
2693         if (eicr & IXGBE_EICR_LSC)
2694                 ixgbe_check_lsc(adapter);
2695
2696         if (eicr & IXGBE_EICR_MAILBOX)
2697                 ixgbe_msg_task(adapter);
2698
2699         switch (hw->mac.type) {
2700         case ixgbe_mac_82599EB:
2701         case ixgbe_mac_X540:
2702         case ixgbe_mac_X550:
2703         case ixgbe_mac_X550EM_x:
2704                 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
2705                     (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2706                         adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
2707                         ixgbe_service_event_schedule(adapter);
2708                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
2709                                         IXGBE_EICR_GPI_SDP0_X540);
2710                 }
2711                 if (eicr & IXGBE_EICR_ECC) {
2712                         e_info(link, "Received ECC Err, initiating reset\n");
2713                         adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2714                         ixgbe_service_event_schedule(adapter);
2715                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2716                 }
2717                 /* Handle Flow Director Full threshold interrupt */
2718                 if (eicr & IXGBE_EICR_FLOW_DIR) {
2719                         int reinit_count = 0;
2720                         int i;
2721                         for (i = 0; i < adapter->num_tx_queues; i++) {
2722                                 struct ixgbe_ring *ring = adapter->tx_ring[i];
2723                                 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2724                                                        &ring->state))
2725                                         reinit_count++;
2726                         }
2727                         if (reinit_count) {
2728                                 /* no more flow director interrupts until after init */
2729                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2730                                 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2731                                 ixgbe_service_event_schedule(adapter);
2732                         }
2733                 }
2734                 ixgbe_check_sfp_event(adapter, eicr);
2735                 ixgbe_check_overtemp_event(adapter, eicr);
2736                 break;
2737         default:
2738                 break;
2739         }
2740
2741         ixgbe_check_fan_failure(adapter, eicr);
2742
2743         if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2744                 ixgbe_ptp_check_pps_event(adapter);
2745
2746         /* re-enable the original interrupt state, no lsc, no queues */
2747         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2748                 ixgbe_irq_enable(adapter, false, false);
2749
2750         return IRQ_HANDLED;
2751 }
2752
2753 static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
2754 {
2755         struct ixgbe_q_vector *q_vector = data;
2756
2757         /* EIAM disabled interrupts (on this vector) for us */
2758
2759         if (q_vector->rx.ring || q_vector->tx.ring)
2760                 napi_schedule_irqoff(&q_vector->napi);
2761
2762         return IRQ_HANDLED;
2763 }
2764
2765 /**
2766  * ixgbe_poll - NAPI Rx polling callback
2767  * @napi: structure for representing this polling device
2768  * @budget: how many packets driver is allowed to clean
2769  *
2770  * This function is used for legacy and MSI, NAPI mode
2771  **/
2772 int ixgbe_poll(struct napi_struct *napi, int budget)
2773 {
2774         struct ixgbe_q_vector *q_vector =
2775                                 container_of(napi, struct ixgbe_q_vector, napi);
2776         struct ixgbe_adapter *adapter = q_vector->adapter;
2777         struct ixgbe_ring *ring;
2778         int per_ring_budget, work_done = 0;
2779         bool clean_complete = true;
2780
2781 #ifdef CONFIG_IXGBE_DCA
2782         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2783                 ixgbe_update_dca(q_vector);
2784 #endif
2785
2786         ixgbe_for_each_ring(ring, q_vector->tx)
2787                 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2788
2789         /* Exit if we are called by netpoll or busy polling is active */
2790         if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
2791                 return budget;
2792
2793         /* attempt to distribute budget to each queue fairly, but don't allow
2794          * the budget to go below 1 because we'll exit polling */
2795         if (q_vector->rx.count > 1)
2796                 per_ring_budget = max(budget/q_vector->rx.count, 1);
2797         else
2798                 per_ring_budget = budget;
2799
2800         ixgbe_for_each_ring(ring, q_vector->rx) {
2801                 int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
2802                                                  per_ring_budget);
2803
2804                 work_done += cleaned;
2805                 clean_complete &= (cleaned < per_ring_budget);
2806         }
2807
2808         ixgbe_qv_unlock_napi(q_vector);
2809         /* If all work not completed, return budget and keep polling */
2810         if (!clean_complete)
2811                 return budget;
2812
2813         /* all work done, exit the polling mode */
2814         napi_complete_done(napi, work_done);
2815         if (adapter->rx_itr_setting & 1)
2816                 ixgbe_set_itr(q_vector);
2817         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2818                 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2819
2820         return 0;
2821 }
2822
2823 /**
2824  * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2825  * @adapter: board private structure
2826  *
2827  * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2828  * interrupts from the kernel.
2829  **/
2830 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2831 {
2832         struct net_device *netdev = adapter->netdev;
2833         int vector, err;
2834         int ri = 0, ti = 0;
2835
2836         for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2837                 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2838                 struct msix_entry *entry = &adapter->msix_entries[vector];
2839
2840                 if (q_vector->tx.ring && q_vector->rx.ring) {
2841                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2842                                  "%s-%s-%d", netdev->name, "TxRx", ri++);
2843                         ti++;
2844                 } else if (q_vector->rx.ring) {
2845                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2846                                  "%s-%s-%d", netdev->name, "rx", ri++);
2847                 } else if (q_vector->tx.ring) {
2848                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2849                                  "%s-%s-%d", netdev->name, "tx", ti++);
2850                 } else {
2851                         /* skip this unused q_vector */
2852                         continue;
2853                 }
2854                 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2855                                   q_vector->name, q_vector);
2856                 if (err) {
2857                         e_err(probe, "request_irq failed for MSIX interrupt "
2858                               "Error: %d\n", err);
2859                         goto free_queue_irqs;
2860                 }
2861                 /* If Flow Director is enabled, set interrupt affinity */
2862                 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2863                         /* assign the mask for this irq */
2864                         irq_set_affinity_hint(entry->vector,
2865                                               &q_vector->affinity_mask);
2866                 }
2867         }
2868
2869         err = request_irq(adapter->msix_entries[vector].vector,
2870                           ixgbe_msix_other, 0, netdev->name, adapter);
2871         if (err) {
2872                 e_err(probe, "request_irq for msix_other failed: %d\n", err);
2873                 goto free_queue_irqs;
2874         }
2875
2876         return 0;
2877
2878 free_queue_irqs:
2879         while (vector) {
2880                 vector--;
2881                 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2882                                       NULL);
2883                 free_irq(adapter->msix_entries[vector].vector,
2884                          adapter->q_vector[vector]);
2885         }
2886         adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2887         pci_disable_msix(adapter->pdev);
2888         kfree(adapter->msix_entries);
2889         adapter->msix_entries = NULL;
2890         return err;
2891 }
2892
2893 /**
2894  * ixgbe_intr - legacy mode Interrupt Handler
2895  * @irq: interrupt number
2896  * @data: pointer to a network interface device structure
2897  **/
2898 static irqreturn_t ixgbe_intr(int irq, void *data)
2899 {
2900         struct ixgbe_adapter *adapter = data;
2901         struct ixgbe_hw *hw = &adapter->hw;
2902         struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2903         u32 eicr;
2904
2905         /*
2906          * Workaround for silicon errata #26 on 82598.  Mask the interrupt
2907          * before the read of EICR.
2908          */
2909         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2910
2911         /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2912          * therefore no explicit interrupt disable is necessary */
2913         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2914         if (!eicr) {
2915                 /*
2916                  * shared interrupt alert!
2917                  * make sure interrupts are enabled because the read will
2918                  * have disabled interrupts due to EIAM
2919                  * finish the workaround of silicon errata on 82598.  Unmask
2920                  * the interrupt that we masked before the EICR read.
2921                  */
2922                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2923                         ixgbe_irq_enable(adapter, true, true);
2924                 return IRQ_NONE;        /* Not our interrupt */
2925         }
2926
2927         if (eicr & IXGBE_EICR_LSC)
2928                 ixgbe_check_lsc(adapter);
2929
2930         switch (hw->mac.type) {
2931         case ixgbe_mac_82599EB:
2932                 ixgbe_check_sfp_event(adapter, eicr);
2933                 /* Fall through */
2934         case ixgbe_mac_X540:
2935         case ixgbe_mac_X550:
2936         case ixgbe_mac_X550EM_x:
2937                 if (eicr & IXGBE_EICR_ECC) {
2938                         e_info(link, "Received ECC Err, initiating reset\n");
2939                         adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2940                         ixgbe_service_event_schedule(adapter);
2941                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2942                 }
2943                 ixgbe_check_overtemp_event(adapter, eicr);
2944                 break;
2945         default:
2946                 break;
2947         }
2948
2949         ixgbe_check_fan_failure(adapter, eicr);
2950         if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2951                 ixgbe_ptp_check_pps_event(adapter);
2952
2953         /* would disable interrupts here but EIAM disabled it */
2954         napi_schedule_irqoff(&q_vector->napi);
2955
2956         /*
2957          * re-enable link(maybe) and non-queue interrupts, no flush.
2958          * ixgbe_poll will re-enable the queue interrupts
2959          */
2960         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2961                 ixgbe_irq_enable(adapter, false, false);
2962
2963         return IRQ_HANDLED;
2964 }
2965
2966 /**
2967  * ixgbe_request_irq - initialize interrupts
2968  * @adapter: board private structure
2969  *
2970  * Attempts to configure interrupts using the best available
2971  * capabilities of the hardware and kernel.
2972  **/
2973 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2974 {
2975         struct net_device *netdev = adapter->netdev;
2976         int err;
2977
2978         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2979                 err = ixgbe_request_msix_irqs(adapter);
2980         else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2981                 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2982                                   netdev->name, adapter);
2983         else
2984                 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2985                                   netdev->name, adapter);
2986
2987         if (err)
2988                 e_err(probe, "request_irq failed, Error %d\n", err);
2989
2990         return err;
2991 }
2992
2993 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2994 {
2995         int vector;
2996
2997         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2998                 free_irq(adapter->pdev->irq, adapter);
2999                 return;
3000         }
3001
3002         for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3003                 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3004                 struct msix_entry *entry = &adapter->msix_entries[vector];
3005
3006                 /* free only the irqs that were actually requested */
3007                 if (!q_vector->rx.ring && !q_vector->tx.ring)
3008                         continue;
3009
3010                 /* clear the affinity_mask in the IRQ descriptor */
3011                 irq_set_affinity_hint(entry->vector, NULL);
3012
3013                 free_irq(entry->vector, q_vector);
3014         }
3015
3016         free_irq(adapter->msix_entries[vector++].vector, adapter);
3017 }
3018
3019 /**
3020  * ixgbe_irq_disable - Mask off interrupt generation on the NIC
3021  * @adapter: board private structure
3022  **/
3023 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3024 {
3025         switch (adapter->hw.mac.type) {
3026         case ixgbe_mac_82598EB:
3027                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3028                 break;
3029         case ixgbe_mac_82599EB:
3030         case ixgbe_mac_X540:
3031         case ixgbe_mac_X550:
3032         case ixgbe_mac_X550EM_x:
3033                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3034                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3035                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3036                 break;
3037         default:
3038                 break;
3039         }
3040         IXGBE_WRITE_FLUSH(&adapter->hw);
3041         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3042                 int vector;
3043
3044                 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3045                         synchronize_irq(adapter->msix_entries[vector].vector);
3046
3047                 synchronize_irq(adapter->msix_entries[vector++].vector);
3048         } else {
3049                 synchronize_irq(adapter->pdev->irq);
3050         }
3051 }
3052
3053 /**
3054  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
3055  *
3056  **/
3057 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3058 {
3059         struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3060
3061         ixgbe_write_eitr(q_vector);
3062
3063         ixgbe_set_ivar(adapter, 0, 0, 0);
3064         ixgbe_set_ivar(adapter, 1, 0, 0);
3065
3066         e_info(hw, "Legacy interrupt IVAR setup done\n");
3067 }
3068
3069 /**
3070  * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
3071  * @adapter: board private structure
3072  * @ring: structure containing ring specific data
3073  *
3074  * Configure the Tx descriptor ring after a reset.
3075  **/
3076 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3077                              struct ixgbe_ring *ring)
3078 {
3079         struct ixgbe_hw *hw = &adapter->hw;
3080         u64 tdba = ring->dma;
3081         int wait_loop = 10;
3082         u32 txdctl = IXGBE_TXDCTL_ENABLE;
3083         u8 reg_idx = ring->reg_idx;
3084
3085         /* disable queue to avoid issues while updating state */
3086         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3087         IXGBE_WRITE_FLUSH(hw);
3088
3089         IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3090                         (tdba & DMA_BIT_MASK(32)));
3091         IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3092         IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3093                         ring->count * sizeof(union ixgbe_adv_tx_desc));
3094         IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3095         IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3096         ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3097
3098         /*
3099          * set WTHRESH to encourage burst writeback, it should not be set
3100          * higher than 1 when:
3101          * - ITR is 0 as it could cause false TX hangs
3102          * - ITR is set to > 100k int/sec and BQL is enabled
3103          *
3104          * In order to avoid issues WTHRESH + PTHRESH should always be equal
3105          * to or less than the number of on chip descriptors, which is
3106          * currently 40.
3107          */
3108         if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3109                 txdctl |= (1 << 16);    /* WTHRESH = 1 */
3110         else
3111                 txdctl |= (8 << 16);    /* WTHRESH = 8 */
3112
3113         /*
3114          * Setting PTHRESH to 32 both improves performance
3115          * and avoids a TX hang with DFP enabled
3116          */
3117         txdctl |= (1 << 8) |    /* HTHRESH = 1 */
3118                    32;          /* PTHRESH = 32 */
3119
3120         /* reinitialize flowdirector state */
3121         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3122                 ring->atr_sample_rate = adapter->atr_sample_rate;
3123                 ring->atr_count = 0;
3124                 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3125         } else {
3126                 ring->atr_sample_rate = 0;
3127         }
3128
3129         /* initialize XPS */
3130         if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3131                 struct ixgbe_q_vector *q_vector = ring->q_vector;
3132
3133                 if (q_vector)
3134                         netif_set_xps_queue(ring->netdev,
3135                                             &q_vector->affinity_mask,
3136                                             ring->queue_index);
3137         }
3138
3139         clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3140
3141         /* enable queue */
3142         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3143
3144         /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3145         if (hw->mac.type == ixgbe_mac_82598EB &&
3146             !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3147                 return;
3148
3149         /* poll to verify queue is enabled */
3150         do {
3151                 usleep_range(1000, 2000);
3152                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3153         } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3154         if (!wait_loop)
3155                 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
3156 }
3157
3158 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3159 {
3160         struct ixgbe_hw *hw = &adapter->hw;
3161         u32 rttdcs, mtqc;
3162         u8 tcs = netdev_get_num_tc(adapter->netdev);
3163
3164         if (hw->mac.type == ixgbe_mac_82598EB)
3165                 return;
3166
3167         /* disable the arbiter while setting MTQC */
3168         rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3169         rttdcs |= IXGBE_RTTDCS_ARBDIS;
3170         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3171
3172         /* set transmit pool layout */
3173         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3174                 mtqc = IXGBE_MTQC_VT_ENA;
3175                 if (tcs > 4)
3176                         mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3177                 else if (tcs > 1)
3178                         mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3179                 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3180                         mtqc |= IXGBE_MTQC_32VF;
3181                 else
3182                         mtqc |= IXGBE_MTQC_64VF;
3183         } else {
3184                 if (tcs > 4)
3185                         mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3186                 else if (tcs > 1)
3187                         mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3188                 else
3189                         mtqc = IXGBE_MTQC_64Q_1PB;
3190         }
3191
3192         IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3193
3194         /* Enable Security TX Buffer IFG for multiple pb */
3195         if (tcs) {
3196                 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3197                 sectx |= IXGBE_SECTX_DCB;
3198                 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3199         }
3200
3201         /* re-enable the arbiter */
3202         rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3203         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3204 }
3205
3206 /**
3207  * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
3208  * @adapter: board private structure
3209  *
3210  * Configure the Tx unit of the MAC after a reset.
3211  **/
3212 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3213 {
3214         struct ixgbe_hw *hw = &adapter->hw;
3215         u32 dmatxctl;
3216         u32 i;
3217
3218         ixgbe_setup_mtqc(adapter);
3219
3220         if (hw->mac.type != ixgbe_mac_82598EB) {
3221                 /* DMATXCTL.EN must be before Tx queues are enabled */
3222                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3223                 dmatxctl |= IXGBE_DMATXCTL_TE;
3224                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3225         }
3226
3227         /* Setup the HW Tx Head and Tail descriptor pointers */
3228         for (i = 0; i < adapter->num_tx_queues; i++)
3229                 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3230 }
3231
3232 static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3233                                  struct ixgbe_ring *ring)
3234 {
3235         struct ixgbe_hw *hw = &adapter->hw;
3236         u8 reg_idx = ring->reg_idx;
3237         u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3238
3239         srrctl |= IXGBE_SRRCTL_DROP_EN;
3240
3241         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3242 }
3243
3244 static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3245                                   struct ixgbe_ring *ring)
3246 {
3247         struct ixgbe_hw *hw = &adapter->hw;
3248         u8 reg_idx = ring->reg_idx;
3249         u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3250
3251         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3252
3253         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3254 }
3255
3256 #ifdef CONFIG_IXGBE_DCB
3257 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3258 #else
3259 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3260 #endif
3261 {
3262         int i;
3263         bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3264
3265         if (adapter->ixgbe_ieee_pfc)
3266                 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3267
3268         /*
3269          * We should set the drop enable bit if:
3270          *  SR-IOV is enabled
3271          *   or
3272          *  Number of Rx queues > 1 and flow control is disabled
3273          *
3274          *  This allows us to avoid head of line blocking for security
3275          *  and performance reasons.
3276          */
3277         if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3278             !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3279                 for (i = 0; i < adapter->num_rx_queues; i++)
3280                         ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3281         } else {
3282                 for (i = 0; i < adapter->num_rx_queues; i++)
3283                         ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3284         }
3285 }
3286
3287 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3288
3289 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3290                                    struct ixgbe_ring *rx_ring)
3291 {
3292         struct ixgbe_hw *hw = &adapter->hw;
3293         u32 srrctl;
3294         u8 reg_idx = rx_ring->reg_idx;
3295
3296         if (hw->mac.type == ixgbe_mac_82598EB) {
3297                 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3298
3299                 /*
3300                  * if VMDq is not active we must program one srrctl register
3301                  * per RSS queue since we have enabled RDRXCTL.MVMEN
3302                  */
3303                 reg_idx &= mask;
3304         }
3305
3306         /* configure header buffer length, needed for RSC */
3307         srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3308
3309         /* configure the packet buffer length */
3310         srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3311
3312         /* configure descriptor type */
3313         srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3314
3315         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3316 }
3317
3318 /**
3319  * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries
3320  * @adapter: device handle
3321  *
3322  *  - 82598/82599/X540:     128
3323  *  - X550(non-SRIOV mode): 512
3324  *  - X550(SRIOV mode):     64
3325  */
3326 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3327 {
3328         if (adapter->hw.mac.type < ixgbe_mac_X550)
3329                 return 128;
3330         else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3331                 return 64;
3332         else
3333                 return 512;
3334 }
3335
3336 /**
3337  * ixgbe_store_reta - Write the RETA table to HW
3338  * @adapter: device handle
3339  *
3340  * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
3341  */
3342 void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3343 {
3344         u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3345         struct ixgbe_hw *hw = &adapter->hw;
3346         u32 reta = 0;
3347         u32 indices_multi;
3348         u8 *indir_tbl = adapter->rss_indir_tbl;
3349
3350         /* Fill out the redirection table as follows:
3351          *  - 82598:      8 bit wide entries containing pair of 4 bit RSS
3352          *    indices.
3353          *  - 82599/X540: 8 bit wide entries containing 4 bit RSS index
3354          *  - X550:       8 bit wide entries containing 6 bit RSS index
3355          */
3356         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3357                 indices_multi = 0x11;
3358         else
3359                 indices_multi = 0x1;
3360
3361         /* Write redirection table to HW */
3362         for (i = 0; i < reta_entries; i++) {
3363                 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3364                 if ((i & 3) == 3) {
3365                         if (i < 128)
3366                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3367                         else
3368                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3369                                                 reta);
3370                         reta = 0;
3371                 }
3372         }
3373 }
3374
3375 /**
3376  * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode)
3377  * @adapter: device handle
3378  *
3379  * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
3380  */
3381 static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3382 {
3383         u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3384         struct ixgbe_hw *hw = &adapter->hw;
3385         u32 vfreta = 0;
3386         unsigned int pf_pool = adapter->num_vfs;
3387
3388         /* Write redirection table to HW */
3389         for (i = 0; i < reta_entries; i++) {
3390                 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3391                 if ((i & 3) == 3) {
3392                         IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
3393                                         vfreta);
3394                         vfreta = 0;
3395                 }
3396         }
3397 }
3398
3399 static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3400 {
3401         struct ixgbe_hw *hw = &adapter->hw;
3402         u32 i, j;
3403         u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3404         u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3405
3406         /* Program table for at least 2 queues w/ SR-IOV so that VFs can
3407          * make full use of any rings they may have.  We will use the
3408          * PSRTYPE register to control how many rings we use within the PF.
3409          */
3410         if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
3411                 rss_i = 2;
3412
3413         /* Fill out hash function seeds */
3414         for (i = 0; i < 10; i++)
3415                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3416
3417         /* Fill out redirection table */
3418         memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3419
3420         for (i = 0, j = 0; i < reta_entries; i++, j++) {
3421                 if (j == rss_i)
3422                         j = 0;
3423
3424                 adapter->rss_indir_tbl[i] = j;
3425         }
3426
3427         ixgbe_store_reta(adapter);
3428 }
3429
3430 static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3431 {
3432         struct ixgbe_hw *hw = &adapter->hw;
3433         u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3434         unsigned int pf_pool = adapter->num_vfs;
3435         int i, j;
3436
3437         /* Fill out hash function seeds */
3438         for (i = 0; i < 10; i++)
3439                 IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
3440                                 adapter->rss_key[i]);
3441
3442         /* Fill out the redirection table */
3443         for (i = 0, j = 0; i < 64; i++, j++) {
3444                 if (j == rss_i)
3445                         j = 0;
3446
3447                 adapter->rss_indir_tbl[i] = j;
3448         }
3449
3450         ixgbe_store_vfreta(adapter);
3451 }
3452
3453 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3454 {
3455         struct ixgbe_hw *hw = &adapter->hw;
3456         u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3457         u32 rxcsum;
3458
3459         /* Disable indicating checksum in descriptor, enables RSS hash */
3460         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3461         rxcsum |= IXGBE_RXCSUM_PCSD;
3462         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3463
3464         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3465                 if (adapter->ring_feature[RING_F_RSS].mask)
3466                         mrqc = IXGBE_MRQC_RSSEN;
3467         } else {
3468                 u8 tcs = netdev_get_num_tc(adapter->netdev);
3469
3470                 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3471                         if (tcs > 4)
3472                                 mrqc = IXGBE_MRQC_VMDQRT8TCEN;  /* 8 TCs */
3473                         else if (tcs > 1)
3474                                 mrqc = IXGBE_MRQC_VMDQRT4TCEN;  /* 4 TCs */
3475                         else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3476                                 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3477                         else
3478                                 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3479                 } else {
3480                         if (tcs > 4)
3481                                 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3482                         else if (tcs > 1)
3483                                 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3484                         else
3485                                 mrqc = IXGBE_MRQC_RSSEN;
3486                 }
3487         }
3488
3489         /* Perform hash on these packet types */
3490         rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3491                      IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3492                      IXGBE_MRQC_RSS_FIELD_IPV6 |
3493                      IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3494
3495         if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3496                 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3497         if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3498                 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3499
3500         netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
3501         if ((hw->mac.type >= ixgbe_mac_X550) &&
3502             (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3503                 unsigned int pf_pool = adapter->num_vfs;
3504
3505                 /* Enable VF RSS mode */
3506                 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3507                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3508
3509                 /* Setup RSS through the VF registers */
3510                 ixgbe_setup_vfreta(adapter);
3511                 vfmrqc = IXGBE_MRQC_RSSEN;
3512                 vfmrqc |= rss_field;
3513                 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
3514         } else {
3515                 ixgbe_setup_reta(adapter);
3516                 mrqc |= rss_field;
3517                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3518         }
3519 }
3520
3521 /**
3522  * ixgbe_configure_rscctl - enable RSC for the indicated ring
3523  * @adapter:    address of board private structure
3524  * @index:      index of ring to set
3525  **/
3526 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3527                                    struct ixgbe_ring *ring)
3528 {
3529         struct ixgbe_hw *hw = &adapter->hw;
3530         u32 rscctrl;
3531         u8 reg_idx = ring->reg_idx;
3532
3533         if (!ring_is_rsc_enabled(ring))
3534                 return;
3535
3536         rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3537         rscctrl |= IXGBE_RSCCTL_RSCEN;
3538         /*
3539          * we must limit the number of descriptors so that the
3540          * total size of max desc * buf_len is not greater
3541          * than 65536
3542          */
3543         rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3544         IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
3545 }
3546
3547 #define IXGBE_MAX_RX_DESC_POLL 10
3548 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3549                                        struct ixgbe_ring *ring)
3550 {
3551         struct ixgbe_hw *hw = &adapter->hw;
3552         int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3553         u32 rxdctl;
3554         u8 reg_idx = ring->reg_idx;
3555
3556         if (ixgbe_removed(hw->hw_addr))
3557                 return;
3558         /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3559         if (hw->mac.type == ixgbe_mac_82598EB &&
3560             !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3561                 return;
3562
3563         do {
3564                 usleep_range(1000, 2000);
3565                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3566         } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3567
3568         if (!wait_loop) {
3569                 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3570                       "the polling period\n", reg_idx);
3571         }
3572 }
3573
3574 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3575                             struct ixgbe_ring *ring)
3576 {
3577         struct ixgbe_hw *hw = &adapter->hw;
3578         int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3579         u32 rxdctl;
3580         u8 reg_idx = ring->reg_idx;
3581
3582         if (ixgbe_removed(hw->hw_addr))
3583                 return;
3584         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3585         rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3586
3587         /* write value back with RXDCTL.ENABLE bit cleared */
3588         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3589
3590         if (hw->mac.type == ixgbe_mac_82598EB &&
3591             !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3592                 return;
3593
3594         /* the hardware may take up to 100us to really disable the rx queue */
3595         do {
3596                 udelay(10);
3597                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3598         } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3599
3600         if (!wait_loop) {
3601                 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3602                       "the polling period\n", reg_idx);
3603         }
3604 }
3605
3606 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3607                              struct ixgbe_ring *ring)
3608 {
3609         struct ixgbe_hw *hw = &adapter->hw;
3610         u64 rdba = ring->dma;
3611         u32 rxdctl;
3612         u8 reg_idx = ring->reg_idx;
3613
3614         /* disable queue to avoid issues while updating state */
3615         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3616         ixgbe_disable_rx_queue(adapter, ring);
3617
3618         IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3619         IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3620         IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3621                         ring->count * sizeof(union ixgbe_adv_rx_desc));
3622         IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3623         IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3624         ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
3625
3626         ixgbe_configure_srrctl(adapter, ring);
3627         ixgbe_configure_rscctl(adapter, ring);
3628
3629         if (hw->mac.type == ixgbe_mac_82598EB) {
3630                 /*
3631                  * enable cache line friendly hardware writes:
3632                  * PTHRESH=32 descriptors (half the internal cache),
3633                  * this also removes ugly rx_no_buffer_count increment
3634                  * HTHRESH=4 descriptors (to minimize latency on fetch)
3635                  * WTHRESH=8 burst writeback up to two cache lines
3636                  */
3637                 rxdctl &= ~0x3FFFFF;
3638                 rxdctl |=  0x080420;
3639         }
3640
3641         /* enable receive descriptor ring */
3642         rxdctl |= IXGBE_RXDCTL_ENABLE;
3643         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3644
3645         ixgbe_rx_desc_queue_enable(adapter, ring);
3646         ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3647 }
3648
3649 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3650 {
3651         struct ixgbe_hw *hw = &adapter->hw;
3652         int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3653         u16 pool;
3654
3655         /* PSRTYPE must be initialized in non 82598 adapters */
3656         u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3657                       IXGBE_PSRTYPE_UDPHDR |
3658                       IXGBE_PSRTYPE_IPV4HDR |
3659                       IXGBE_PSRTYPE_L2HDR |
3660                       IXGBE_PSRTYPE_IPV6HDR;
3661
3662         if (hw->mac.type == ixgbe_mac_82598EB)
3663                 return;
3664
3665         if (rss_i > 3)
3666                 psrtype |= 2 << 29;
3667         else if (rss_i > 1)
3668                 psrtype |= 1 << 29;
3669
3670         for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
3671                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
3672 }
3673
3674 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3675 {
3676         struct ixgbe_hw *hw = &adapter->hw;
3677         u32 reg_offset, vf_shift;
3678         u32 gcr_ext, vmdctl;
3679         int i;
3680
3681         if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3682                 return;
3683
3684         vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3685         vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3686         vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3687         vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3688         vmdctl |= IXGBE_VT_CTL_REPLEN;
3689         IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3690
3691         vf_shift = VMDQ_P(0) % 32;
3692         reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3693
3694         /* Enable only the PF's pool for Tx/Rx */
3695         IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3696         IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3697         IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3698         IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3699         if (adapter->bridge_mode == BRIDGE_MODE_VEB)
3700                 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3701
3702         /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3703         hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3704
3705         /* clear VLAN promisc flag so VFTA will be updated if necessary */
3706         adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
3707
3708         /*
3709          * Set up VF register offsets for selected VT Mode,
3710          * i.e. 32 or 64 VFs for SR-IOV
3711          */
3712         switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3713         case IXGBE_82599_VMDQ_8Q_MASK:
3714                 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3715                 break;
3716         case IXGBE_82599_VMDQ_4Q_MASK:
3717                 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3718                 break;
3719         default:
3720                 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3721                 break;
3722         }
3723
3724         IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3725
3726
3727         /* Enable MAC Anti-Spoofing */
3728         hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3729                                           adapter->num_vfs);
3730
3731         /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
3732          * calling set_ethertype_anti_spoofing for each VF in loop below
3733          */
3734         if (hw->mac.ops.set_ethertype_anti_spoofing) {
3735                 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
3736                                 (IXGBE_ETQF_FILTER_EN    |
3737                                  IXGBE_ETQF_TX_ANTISPOOF |
3738                                  IXGBE_ETH_P_LLDP));
3739
3740                 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
3741                                 (IXGBE_ETQF_FILTER_EN |
3742                                  IXGBE_ETQF_TX_ANTISPOOF |
3743                                  ETH_P_PAUSE));
3744         }
3745
3746         /* For VFs that have spoof checking turned off */
3747         for (i = 0; i < adapter->num_vfs; i++) {
3748                 if (!adapter->vfinfo[i].spoofchk_enabled)
3749                         ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3750
3751                 /* enable ethertype anti spoofing if hw supports it */
3752                 if (hw->mac.ops.set_ethertype_anti_spoofing)
3753                         hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
3754
3755                 /* Enable/Disable RSS query feature  */
3756                 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
3757                                           adapter->vfinfo[i].rss_query_enabled);
3758         }
3759 }
3760
3761 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3762 {
3763         struct ixgbe_hw *hw = &adapter->hw;
3764         struct net_device *netdev = adapter->netdev;
3765         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3766         struct ixgbe_ring *rx_ring;
3767         int i;
3768         u32 mhadd, hlreg0;
3769
3770 #ifdef IXGBE_FCOE
3771         /* adjust max frame to be able to do baby jumbo for FCoE */
3772         if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3773             (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3774                 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3775
3776 #endif /* IXGBE_FCOE */
3777
3778         /* adjust max frame to be at least the size of a standard frame */
3779         if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3780                 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3781
3782         mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3783         if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3784                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3785                 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3786
3787                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3788         }
3789
3790         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3791         /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
3792         hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3793         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3794
3795         /*
3796          * Setup the HW Rx Head and Tail Descriptor Pointers and
3797          * the Base and Length of the Rx Descriptor Ring
3798          */
3799         for (i = 0; i < adapter->num_rx_queues; i++) {
3800                 rx_ring = adapter->rx_ring[i];
3801                 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3802                         set_ring_rsc_enabled(rx_ring);
3803                 else
3804                         clear_ring_rsc_enabled(rx_ring);
3805         }
3806 }
3807
3808 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3809 {
3810         struct ixgbe_hw *hw = &adapter->hw;
3811         u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3812
3813         switch (hw->mac.type) {
3814         case ixgbe_mac_82598EB:
3815                 /*
3816                  * For VMDq support of different descriptor types or
3817                  * buffer sizes through the use of multiple SRRCTL
3818                  * registers, RDRXCTL.MVMEN must be set to 1
3819                  *
3820                  * also, the manual doesn't mention it clearly but DCA hints
3821                  * will only use queue 0's tags unless this bit is set.  Side
3822                  * effects of setting this bit are only that SRRCTL must be
3823                  * fully programmed [0..15]
3824                  */
3825                 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3826                 break;
3827         case ixgbe_mac_X550:
3828         case ixgbe_mac_X550EM_x:
3829                 if (adapter->num_vfs)
3830                         rdrxctl |= IXGBE_RDRXCTL_PSP;
3831                 /* fall through for older HW */
3832         case ixgbe_mac_82599EB:
3833         case ixgbe_mac_X540:
3834                 /* Disable RSC for ACK packets */
3835                 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3836                    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3837                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3838                 /* hardware requires some bits to be set by default */
3839                 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3840                 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3841                 break;
3842         default:
3843                 /* We should do nothing since we don't know this hardware */
3844                 return;
3845         }
3846
3847         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3848 }
3849
3850 /**
3851  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3852  * @adapter: board private structure
3853  *
3854  * Configure the Rx unit of the MAC after a reset.
3855  **/
3856 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3857 {
3858         struct ixgbe_hw *hw = &adapter->hw;
3859         int i;
3860         u32 rxctrl, rfctl;
3861
3862         /* disable receives while setting up the descriptors */
3863         hw->mac.ops.disable_rx(hw);
3864
3865         ixgbe_setup_psrtype(adapter);
3866         ixgbe_setup_rdrxctl(adapter);
3867
3868         /* RSC Setup */
3869         rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
3870         rfctl &= ~IXGBE_RFCTL_RSC_DIS;
3871         if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
3872                 rfctl |= IXGBE_RFCTL_RSC_DIS;
3873         IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
3874
3875         /* Program registers for the distribution of queues */
3876         ixgbe_setup_mrqc(adapter);
3877
3878         /* set_rx_buffer_len must be called before ring initialization */
3879         ixgbe_set_rx_buffer_len(adapter);
3880
3881         /*
3882          * Setup the HW Rx Head and Tail Descriptor Pointers and
3883          * the Base and Length of the Rx Descriptor Ring
3884          */
3885         for (i = 0; i < adapter->num_rx_queues; i++)
3886                 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3887
3888         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3889         /* disable drop enable for 82598 parts */
3890         if (hw->mac.type == ixgbe_mac_82598EB)
3891                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3892
3893         /* enable all receives */
3894         rxctrl |= IXGBE_RXCTRL_RXEN;
3895         hw->mac.ops.enable_rx_dma(hw, rxctrl);
3896 }
3897
3898 static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
3899                                  __be16 proto, u16 vid)
3900 {
3901         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3902         struct ixgbe_hw *hw = &adapter->hw;
3903
3904         /* add VID to filter table */
3905         hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
3906         set_bit(vid, adapter->active_vlans);
3907
3908         return 0;
3909 }
3910
3911 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3912                                   __be16 proto, u16 vid)
3913 {
3914         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3915         struct ixgbe_hw *hw = &adapter->hw;
3916
3917         /* remove VID from filter table */
3918         hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false, true);
3919         clear_bit(vid, adapter->active_vlans);
3920
3921         return 0;
3922 }
3923
3924 /**
3925  * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3926  * @adapter: driver data
3927  */
3928 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3929 {
3930         struct ixgbe_hw *hw = &adapter->hw;
3931         u32 vlnctrl;
3932         int i, j;
3933
3934         switch (hw->mac.type) {
3935         case ixgbe_mac_82598EB:
3936                 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3937                 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3938                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3939                 break;
3940         case ixgbe_mac_82599EB:
3941         case ixgbe_mac_X540:
3942         case ixgbe_mac_X550:
3943         case ixgbe_mac_X550EM_x:
3944                 for (i = 0; i < adapter->num_rx_queues; i++) {
3945                         struct ixgbe_ring *ring = adapter->rx_ring[i];
3946
3947                         if (ring->l2_accel_priv)
3948                                 continue;
3949                         j = ring->reg_idx;
3950                         vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3951                         vlnctrl &= ~IXGBE_RXDCTL_VME;
3952                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3953                 }
3954                 break;
3955         default:
3956                 break;
3957         }
3958 }
3959
3960 /**
3961  * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
3962  * @adapter: driver data
3963  */
3964 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3965 {
3966         struct ixgbe_hw *hw = &adapter->hw;
3967         u32 vlnctrl;
3968         int i, j;
3969
3970         switch (hw->mac.type) {
3971         case ixgbe_mac_82598EB:
3972                 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3973                 vlnctrl |= IXGBE_VLNCTRL_VME;
3974                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3975                 break;
3976         case ixgbe_mac_82599EB:
3977         case ixgbe_mac_X540:
3978         case ixgbe_mac_X550:
3979         case ixgbe_mac_X550EM_x:
3980                 for (i = 0; i < adapter->num_rx_queues; i++) {
3981                         struct ixgbe_ring *ring = adapter->rx_ring[i];
3982
3983                         if (ring->l2_accel_priv)
3984                                 continue;
3985                         j = ring->reg_idx;
3986                         vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3987                         vlnctrl |= IXGBE_RXDCTL_VME;
3988                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3989                 }
3990                 break;
3991         default:
3992                 break;
3993         }
3994 }
3995
3996 static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
3997 {
3998         struct ixgbe_hw *hw = &adapter->hw;
3999         u32 vlnctrl, i;
4000
4001         switch (hw->mac.type) {
4002         case ixgbe_mac_82599EB:
4003         case ixgbe_mac_X540:
4004         case ixgbe_mac_X550:
4005         case ixgbe_mac_X550EM_x:
4006         default:
4007                 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
4008                         break;
4009                 /* fall through */
4010         case ixgbe_mac_82598EB:
4011                 /* legacy case, we can just disable VLAN filtering */
4012                 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4013                 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
4014                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4015                 return;
4016         }
4017
4018         /* We are already in VLAN promisc, nothing to do */
4019         if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4020                 return;
4021
4022         /* Set flag so we don't redo unnecessary work */
4023         adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4024
4025         /* Add PF to all active pools */
4026         for (i = IXGBE_VLVF_ENTRIES; --i;) {
4027                 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4028                 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4029
4030                 vlvfb |= 1 << (VMDQ_P(0) % 32);
4031                 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4032         }
4033
4034         /* Set all bits in the VLAN filter table array */
4035         for (i = hw->mac.vft_size; i--;)
4036                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4037 }
4038
4039 #define VFTA_BLOCK_SIZE 8
4040 static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4041 {
4042         struct ixgbe_hw *hw = &adapter->hw;
4043         u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4044         u32 vid_start = vfta_offset * 32;
4045         u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4046         u32 i, vid, word, bits;
4047
4048         for (i = IXGBE_VLVF_ENTRIES; --i;) {
4049                 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4050
4051                 /* pull VLAN ID from VLVF */
4052                 vid = vlvf & VLAN_VID_MASK;
4053
4054                 /* only concern outselves with a certain range */
4055                 if (vid < vid_start || vid >= vid_end)
4056                         continue;
4057
4058                 if (vlvf) {
4059                         /* record VLAN ID in VFTA */
4060                         vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
4061
4062                         /* if PF is part of this then continue */
4063                         if (test_bit(vid, adapter->active_vlans))
4064                                 continue;
4065                 }
4066
4067                 /* remove PF from the pool */
4068                 word = i * 2 + VMDQ_P(0) / 32;
4069                 bits = ~(1 << (VMDQ_P(0) % 32));
4070                 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4071                 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4072         }
4073
4074         /* extract values from active_vlans and write back to VFTA */
4075         for (i = VFTA_BLOCK_SIZE; i--;) {
4076                 vid = (vfta_offset + i) * 32;
4077                 word = vid / BITS_PER_LONG;
4078                 bits = vid % BITS_PER_LONG;
4079
4080                 vfta[i] |= adapter->active_vlans[word] >> bits;
4081
4082                 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4083         }
4084 }
4085
4086 static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4087 {
4088         struct ixgbe_hw *hw = &adapter->hw;
4089         u32 vlnctrl, i;
4090
4091         switch (hw->mac.type) {
4092         case ixgbe_mac_82599EB:
4093         case ixgbe_mac_X540:
4094         case ixgbe_mac_X550:
4095         case ixgbe_mac_X550EM_x:
4096         default:
4097                 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
4098                         break;
4099                 /* fall through */
4100         case ixgbe_mac_82598EB:
4101                 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4102                 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
4103                 vlnctrl |= IXGBE_VLNCTRL_VFE;
4104                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4105                 return;
4106         }
4107
4108         /* We are not in VLAN promisc, nothing to do */
4109         if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4110                 return;
4111
4112         /* Set flag so we don't redo unnecessary work */
4113         adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4114
4115         for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4116                 ixgbe_scrub_vfta(adapter, i);
4117 }
4118
4119 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4120 {
4121         u16 vid;
4122
4123         ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4124
4125         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4126                 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4127 }
4128
4129 /**
4130  * ixgbe_write_mc_addr_list - write multicast addresses to MTA
4131  * @netdev: network interface device structure
4132  *
4133  * Writes multicast address list to the MTA hash table.
4134  * Returns: -ENOMEM on failure
4135  *                0 on no addresses written
4136  *                X on writing X addresses to MTA
4137  **/
4138 static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4139 {
4140         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4141         struct ixgbe_hw *hw = &adapter->hw;
4142
4143         if (!netif_running(netdev))
4144                 return 0;
4145
4146         if (hw->mac.ops.update_mc_addr_list)
4147                 hw->mac.ops.update_mc_addr_list(hw, netdev);
4148         else
4149                 return -ENOMEM;
4150
4151 #ifdef CONFIG_PCI_IOV
4152         ixgbe_restore_vf_multicasts(adapter);
4153 #endif
4154
4155         return netdev_mc_count(netdev);
4156 }
4157
4158 #ifdef CONFIG_PCI_IOV
4159 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4160 {
4161         struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4162         struct ixgbe_hw *hw = &adapter->hw;
4163         int i;
4164
4165         for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4166                 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4167
4168                 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4169                         hw->mac.ops.set_rar(hw, i,
4170                                             mac_table->addr,
4171                                             mac_table->pool,
4172                                             IXGBE_RAH_AV);
4173                 else
4174                         hw->mac.ops.clear_rar(hw, i);
4175         }
4176 }
4177
4178 #endif
4179 static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4180 {
4181         struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4182         struct ixgbe_hw *hw = &adapter->hw;
4183         int i;
4184
4185         for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4186                 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4187                         continue;
4188
4189                 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4190
4191                 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4192                         hw->mac.ops.set_rar(hw, i,
4193                                             mac_table->addr,
4194                                             mac_table->pool,
4195                                             IXGBE_RAH_AV);
4196                 else
4197                         hw->mac.ops.clear_rar(hw, i);
4198         }
4199 }
4200
4201 static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4202 {
4203         struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4204         struct ixgbe_hw *hw = &adapter->hw;
4205         int i;
4206
4207         for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4208                 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4209                 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4210         }
4211
4212         ixgbe_sync_mac_table(adapter);
4213 }
4214
4215 static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4216 {
4217         struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4218         struct ixgbe_hw *hw = &adapter->hw;
4219         int i, count = 0;
4220
4221         for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4222                 /* do not count default RAR as available */
4223                 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4224                         continue;
4225
4226                 /* only count unused and addresses that belong to us */
4227                 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4228                         if (mac_table->pool != pool)
4229                                 continue;
4230                 }
4231
4232                 count++;
4233         }
4234
4235         return count;
4236 }
4237
4238 /* this function destroys the first RAR entry */
4239 static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4240 {
4241         struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4242         struct ixgbe_hw *hw = &adapter->hw;
4243
4244         memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4245         mac_table->pool = VMDQ_P(0);
4246
4247         mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4248
4249         hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4250                             IXGBE_RAH_AV);
4251 }
4252
4253 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4254                          const u8 *addr, u16 pool)
4255 {
4256         struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4257         struct ixgbe_hw *hw = &adapter->hw;
4258         int i;
4259
4260         if (is_zero_ether_addr(addr))
4261                 return -EINVAL;
4262
4263         for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4264                 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4265                         continue;
4266
4267                 ether_addr_copy(mac_table->addr, addr);
4268                 mac_table->pool = pool;
4269
4270                 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4271                                     IXGBE_MAC_STATE_IN_USE;
4272
4273                 ixgbe_sync_mac_table(adapter);
4274
4275                 return i;
4276         }
4277
4278         return -ENOMEM;
4279 }
4280
4281 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4282                          const u8 *addr, u16 pool)
4283 {
4284         struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4285         struct ixgbe_hw *hw = &adapter->hw;
4286         int i;
4287
4288         if (is_zero_ether_addr(addr))
4289                 return -EINVAL;
4290
4291         /* search table for addr, if found clear IN_USE flag and sync */
4292         for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4293                 /* we can only delete an entry if it is in use */
4294                 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4295                         continue;
4296                 /* we only care about entries that belong to the given pool */
4297                 if (mac_table->pool != pool)
4298                         continue;
4299                 /* we only care about a specific MAC address */
4300                 if (!ether_addr_equal(addr, mac_table->addr))
4301                         continue;
4302
4303                 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4304                 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4305
4306                 ixgbe_sync_mac_table(adapter);
4307
4308                 return 0;
4309         }
4310
4311         return -ENOMEM;
4312 }
4313 /**
4314  * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
4315  * @netdev: network interface device structure
4316  *
4317  * Writes unicast address list to the RAR table.
4318  * Returns: -ENOMEM on failure/insufficient address space
4319  *                0 on no addresses written
4320  *                X on writing X addresses to the RAR table
4321  **/
4322 static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
4323 {
4324         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4325         int count = 0;
4326
4327         /* return ENOMEM indicating insufficient memory for addresses */
4328         if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
4329                 return -ENOMEM;
4330
4331         if (!netdev_uc_empty(netdev)) {
4332                 struct netdev_hw_addr *ha;
4333                 netdev_for_each_uc_addr(ha, netdev) {
4334                         ixgbe_del_mac_filter(adapter, ha->addr, vfn);
4335                         ixgbe_add_mac_filter(adapter, ha->addr, vfn);
4336                         count++;
4337                 }
4338         }
4339         return count;
4340 }
4341
4342 static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4343 {
4344         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4345         int ret;
4346
4347         ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4348
4349         return min_t(int, ret, 0);
4350 }
4351
4352 static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4353 {
4354         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4355
4356         ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4357
4358         return 0;
4359 }
4360
4361 /**
4362  * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
4363  * @netdev: network interface device structure
4364  *
4365  * The set_rx_method entry point is called whenever the unicast/multicast
4366  * address list or the network interface flags are updated.  This routine is
4367  * responsible for configuring the hardware for proper unicast, multicast and
4368  * promiscuous mode.
4369  **/
4370 void ixgbe_set_rx_mode(struct net_device *netdev)
4371 {
4372         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4373         struct ixgbe_hw *hw = &adapter->hw;
4374         u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4375         int count;
4376
4377         /* Check for Promiscuous and All Multicast modes */
4378         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4379
4380         /* set all bits that we expect to always be set */
4381         fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
4382         fctrl |= IXGBE_FCTRL_BAM;
4383         fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
4384         fctrl |= IXGBE_FCTRL_PMCF;
4385
4386         /* clear the bits we are changing the status of */
4387         fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4388         if (netdev->flags & IFF_PROMISC) {
4389                 hw->addr_ctrl.user_set_promisc = true;
4390                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4391                 vmolr |= IXGBE_VMOLR_MPE;
4392                 ixgbe_vlan_promisc_enable(adapter);
4393         } else {
4394                 if (netdev->flags & IFF_ALLMULTI) {
4395                         fctrl |= IXGBE_FCTRL_MPE;
4396                         vmolr |= IXGBE_VMOLR_MPE;
4397                 }
4398                 hw->addr_ctrl.user_set_promisc = false;
4399                 ixgbe_vlan_promisc_disable(adapter);
4400         }
4401
4402         /*
4403          * Write addresses to available RAR registers, if there is not
4404          * sufficient space to store all the addresses then enable
4405          * unicast promiscuous mode
4406          */
4407         if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4408                 fctrl |= IXGBE_FCTRL_UPE;
4409                 vmolr |= IXGBE_VMOLR_ROPE;
4410         }
4411
4412         /* Write addresses to the MTA, if the attempt fails
4413          * then we should just turn on promiscuous mode so
4414          * that we can at least receive multicast traffic
4415          */
4416         count = ixgbe_write_mc_addr_list(netdev);
4417         if (count < 0) {
4418                 fctrl |= IXGBE_FCTRL_MPE;
4419                 vmolr |= IXGBE_VMOLR_MPE;
4420         } else if (count) {
4421                 vmolr |= IXGBE_VMOLR_ROMPE;
4422         }
4423
4424         if (hw->mac.type != ixgbe_mac_82598EB) {
4425                 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4426                          ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4427                            IXGBE_VMOLR_ROPE);
4428                 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4429         }
4430
4431         /* This is useful for sniffing bad packets. */
4432         if (adapter->netdev->features & NETIF_F_RXALL) {
4433                 /* UPE and MPE will be handled by normal PROMISC logic
4434                  * in e1000e_set_rx_mode */
4435                 fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
4436                           IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */
4437                           IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */
4438
4439                 fctrl &= ~(IXGBE_FCTRL_DPF);
4440                 /* NOTE:  VLAN filtering is disabled by setting PROMISC */
4441         }
4442
4443         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4444
4445         if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
4446                 ixgbe_vlan_strip_enable(adapter);
4447         else
4448                 ixgbe_vlan_strip_disable(adapter);
4449 }
4450
4451 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4452 {
4453         int q_idx;
4454
4455         for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
4456                 ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
4457                 napi_enable(&adapter->q_vector[q_idx]->napi);
4458         }
4459 }
4460
4461 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
4462 {
4463         int q_idx;
4464
4465         for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
4466                 napi_disable(&adapter->q_vector[q_idx]->napi);
4467                 while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
4468                         pr_info("QV %d locked\n", q_idx);
4469                         usleep_range(1000, 20000);
4470                 }
4471         }
4472 }
4473
4474 static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
4475 {
4476         switch (adapter->hw.mac.type) {
4477         case ixgbe_mac_X550:
4478         case ixgbe_mac_X550EM_x:
4479                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
4480 #ifdef CONFIG_IXGBE_VXLAN
4481                 adapter->vxlan_port = 0;
4482 #endif
4483                 break;
4484         default:
4485                 break;
4486         }
4487 }
4488
4489 #ifdef CONFIG_IXGBE_DCB
4490 /**
4491  * ixgbe_configure_dcb - Configure DCB hardware
4492  * @adapter: ixgbe adapter struct
4493  *
4494  * This is called by the driver on open to configure the DCB hardware.
4495  * This is also called by the gennetlink interface when reconfiguring
4496  * the DCB state.
4497  */
4498 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
4499 {
4500         struct ixgbe_hw *hw = &adapter->hw;
4501         int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4502
4503         if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
4504                 if (hw->mac.type == ixgbe_mac_82598EB)
4505                         netif_set_gso_max_size(adapter->netdev, 65536);
4506                 return;
4507         }
4508
4509         if (hw->mac.type == ixgbe_mac_82598EB)
4510                 netif_set_gso_max_size(adapter->netdev, 32768);
4511
4512 #ifdef IXGBE_FCOE
4513         if (adapter->netdev->features & NETIF_F_FCOE_MTU)
4514                 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
4515 #endif
4516
4517         /* reconfigure the hardware */
4518         if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
4519                 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4520                                                 DCB_TX_CONFIG);
4521                 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4522                                                 DCB_RX_CONFIG);
4523                 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
4524         } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
4525                 ixgbe_dcb_hw_ets(&adapter->hw,
4526                                  adapter->ixgbe_ieee_ets,
4527                                  max_frame);
4528                 ixgbe_dcb_hw_pfc_config(&adapter->hw,
4529                                         adapter->ixgbe_ieee_pfc->pfc_en,
4530                                         adapter->ixgbe_ieee_ets->prio_tc);
4531         }
4532
4533         /* Enable RSS Hash per TC */
4534         if (hw->mac.type != ixgbe_mac_82598EB) {
4535                 u32 msb = 0;
4536                 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
4537
4538                 while (rss_i) {
4539                         msb++;
4540                         rss_i >>= 1;
4541                 }
4542
4543                 /* write msb to all 8 TCs in one write */
4544                 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
4545         }
4546 }
4547 #endif
4548
4549 /* Additional bittime to account for IXGBE framing */
4550 #define IXGBE_ETH_FRAMING 20
4551
4552 /**
4553  * ixgbe_hpbthresh - calculate high water mark for flow control
4554  *
4555  * @adapter: board private structure to calculate for
4556  * @pb: packet buffer to calculate
4557  */
4558 static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4559 {
4560         struct ixgbe_hw *hw = &adapter->hw;
4561         struct net_device *dev = adapter->netdev;
4562         int link, tc, kb, marker;
4563         u32 dv_id, rx_pba;
4564
4565         /* Calculate max LAN frame size */
4566         tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
4567
4568 #ifdef IXGBE_FCOE
4569         /* FCoE traffic class uses FCOE jumbo frames */
4570         if ((dev->features & NETIF_F_FCOE_MTU) &&
4571             (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4572             (pb == ixgbe_fcoe_get_tc(adapter)))
4573                 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4574 #endif
4575
4576         /* Calculate delay value for device */
4577         switch (hw->mac.type) {
4578         case ixgbe_mac_X540:
4579         case ixgbe_mac_X550:
4580         case ixgbe_mac_X550EM_x:
4581                 dv_id = IXGBE_DV_X540(link, tc);
4582                 break;
4583         default:
4584                 dv_id = IXGBE_DV(link, tc);
4585                 break;
4586         }
4587
4588         /* Loopback switch introduces additional latency */
4589         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4590                 dv_id += IXGBE_B2BT(tc);
4591
4592         /* Delay value is calculated in bit times convert to KB */
4593         kb = IXGBE_BT2KB(dv_id);
4594         rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
4595
4596         marker = rx_pba - kb;
4597
4598         /* It is possible that the packet buffer is not large enough
4599          * to provide required headroom. In this case throw an error
4600          * to user and a do the best we can.
4601          */
4602         if (marker < 0) {
4603                 e_warn(drv, "Packet Buffer(%i) can not provide enough"
4604                             "headroom to support flow control."
4605                             "Decrease MTU or number of traffic classes\n", pb);
4606                 marker = tc + 1;
4607         }
4608
4609         return marker;
4610 }
4611
4612 /**
4613  * ixgbe_lpbthresh - calculate low water mark for for flow control
4614  *
4615  * @adapter: board private structure to calculate for
4616  * @pb: packet buffer to calculate
4617  */
4618 static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
4619 {
4620         struct ixgbe_hw *hw = &adapter->hw;
4621         struct net_device *dev = adapter->netdev;
4622         int tc;
4623         u32 dv_id;
4624
4625         /* Calculate max LAN frame size */
4626         tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4627
4628 #ifdef IXGBE_FCOE
4629         /* FCoE traffic class uses FCOE jumbo frames */
4630         if ((dev->features & NETIF_F_FCOE_MTU) &&
4631             (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4632             (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
4633                 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4634 #endif
4635
4636         /* Calculate delay value for device */
4637         switch (hw->mac.type) {
4638         case ixgbe_mac_X540:
4639         case ixgbe_mac_X550:
4640         case ixgbe_mac_X550EM_x:
4641                 dv_id = IXGBE_LOW_DV_X540(tc);
4642                 break;
4643         default:
4644                 dv_id = IXGBE_LOW_DV(tc);
4645                 break;
4646         }
4647
4648         /* Delay value is calculated in bit times convert to KB */
4649         return IXGBE_BT2KB(dv_id);
4650 }
4651
4652 /*
4653  * ixgbe_pbthresh_setup - calculate and setup high low water marks
4654  */
4655 static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
4656 {
4657         struct ixgbe_hw *hw = &adapter->hw;
4658         int num_tc = netdev_get_num_tc(adapter->netdev);
4659         int i;
4660
4661         if (!num_tc)
4662                 num_tc = 1;
4663
4664         for (i = 0; i < num_tc; i++) {
4665                 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
4666                 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
4667
4668                 /* Low water marks must not be larger than high water marks */
4669                 if (hw->fc.low_water[i] > hw->fc.high_water[i])
4670                         hw->fc.low_water[i] = 0;
4671         }
4672
4673         for (; i < MAX_TRAFFIC_CLASS; i++)
4674                 hw->fc.high_water[i] = 0;
4675 }
4676
4677 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
4678 {
4679         struct ixgbe_hw *hw = &adapter->hw;
4680         int hdrm;
4681         u8 tc = netdev_get_num_tc(adapter->netdev);
4682
4683         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4684             adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
4685                 hdrm = 32 << adapter->fdir_pballoc;
4686         else
4687                 hdrm = 0;
4688
4689         hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
4690         ixgbe_pbthresh_setup(adapter);
4691 }
4692
4693 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
4694 {
4695         struct ixgbe_hw *hw = &adapter->hw;
4696         struct hlist_node *node2;
4697         struct ixgbe_fdir_filter *filter;
4698
4699         spin_lock(&adapter->fdir_perfect_lock);
4700
4701         if (!hlist_empty(&adapter->fdir_filter_list))
4702                 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
4703
4704         hlist_for_each_entry_safe(filter, node2,
4705                                   &adapter->fdir_filter_list, fdir_node) {
4706                 ixgbe_fdir_write_perfect_filter_82599(hw,
4707                                 &filter->filter,
4708                                 filter->sw_idx,
4709                                 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
4710                                 IXGBE_FDIR_DROP_QUEUE :
4711                                 adapter->rx_ring[filter->action]->reg_idx);
4712         }
4713
4714         spin_unlock(&adapter->fdir_perfect_lock);
4715 }
4716
4717 static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
4718                                       struct ixgbe_adapter *adapter)
4719 {
4720         struct ixgbe_hw *hw = &adapter->hw;
4721         u32 vmolr;
4722
4723         /* No unicast promiscuous support for VMDQ devices. */
4724         vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4725         vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
4726
4727         /* clear the affected bit */
4728         vmolr &= ~IXGBE_VMOLR_MPE;
4729
4730         if (dev->flags & IFF_ALLMULTI) {
4731                 vmolr |= IXGBE_VMOLR_MPE;
4732         } else {
4733                 vmolr |= IXGBE_VMOLR_ROMPE;
4734                 hw->mac.ops.update_mc_addr_list(hw, dev);
4735         }
4736         ixgbe_write_uc_addr_list(adapter->netdev, pool);
4737         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4738 }
4739
4740 static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
4741 {
4742         struct ixgbe_adapter *adapter = vadapter->real_adapter;
4743         int rss_i = adapter->num_rx_queues_per_pool;
4744         struct ixgbe_hw *hw = &adapter->hw;
4745         u16 pool = vadapter->pool;
4746         u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4747                       IXGBE_PSRTYPE_UDPHDR |
4748                       IXGBE_PSRTYPE_IPV4HDR |
4749                       IXGBE_PSRTYPE_L2HDR |
4750                       IXGBE_PSRTYPE_IPV6HDR;
4751
4752         if (hw->mac.type == ixgbe_mac_82598EB)
4753                 return;
4754
4755         if (rss_i > 3)
4756                 psrtype |= 2 << 29;
4757         else if (rss_i > 1)
4758                 psrtype |= 1 << 29;
4759
4760         IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4761 }
4762
4763 /**
4764  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
4765  * @rx_ring: ring to free buffers from
4766  **/
4767 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4768 {
4769         struct device *dev = rx_ring->dev;
4770         unsigned long size;
4771         u16 i;
4772
4773         /* ring already cleared, nothing to do */
4774         if (!rx_ring->rx_buffer_info)
4775                 return;
4776
4777         /* Free all the Rx ring sk_buffs */
4778         for (i = 0; i < rx_ring->count; i++) {
4779                 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
4780
4781                 if (rx_buffer->skb) {
4782                         struct sk_buff *skb = rx_buffer->skb;
4783                         if (IXGBE_CB(skb)->page_released)
4784                                 dma_unmap_page(dev,
4785                                                IXGBE_CB(skb)->dma,
4786                                                ixgbe_rx_bufsz(rx_ring),
4787                                                DMA_FROM_DEVICE);
4788                         dev_kfree_skb(skb);
4789                         rx_buffer->skb = NULL;
4790                 }
4791
4792                 if (!rx_buffer->page)
4793                         continue;
4794
4795                 dma_unmap_page(dev, rx_buffer->dma,
4796                                ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
4797                 __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
4798
4799                 rx_buffer->page = NULL;
4800         }
4801
4802         size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4803         memset(rx_ring->rx_buffer_info, 0, size);
4804
4805         /* Zero out the descriptor ring */
4806         memset(rx_ring->desc, 0, rx_ring->size);
4807
4808         rx_ring->next_to_alloc = 0;
4809         rx_ring->next_to_clean = 0;
4810         rx_ring->next_to_use = 0;
4811 }
4812
4813 static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
4814                                    struct ixgbe_ring *rx_ring)
4815 {
4816         struct ixgbe_adapter *adapter = vadapter->real_adapter;
4817         int index = rx_ring->queue_index + vadapter->rx_base_queue;
4818
4819         /* shutdown specific queue receive and wait for dma to settle */
4820         ixgbe_disable_rx_queue(adapter, rx_ring);
4821         usleep_range(10000, 20000);
4822         ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
4823         ixgbe_clean_rx_ring(rx_ring);
4824         rx_ring->l2_accel_priv = NULL;
4825 }
4826
4827 static int ixgbe_fwd_ring_down(struct net_device *vdev,
4828                                struct ixgbe_fwd_adapter *accel)
4829 {
4830         struct ixgbe_adapter *adapter = accel->real_adapter;
4831         unsigned int rxbase = accel->rx_base_queue;
4832         unsigned int txbase = accel->tx_base_queue;
4833         int i;
4834
4835         netif_tx_stop_all_queues(vdev);
4836
4837         for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4838                 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4839                 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
4840         }
4841
4842         for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4843                 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
4844                 adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
4845         }
4846
4847
4848         return 0;
4849 }
4850
4851 static int ixgbe_fwd_ring_up(struct net_device *vdev,
4852                              struct ixgbe_fwd_adapter *accel)
4853 {
4854         struct ixgbe_adapter *adapter = accel->real_adapter;
4855         unsigned int rxbase, txbase, queues;
4856         int i, baseq, err = 0;
4857
4858         if (!test_bit(accel->pool, &adapter->fwd_bitmask))
4859                 return 0;
4860
4861         baseq = accel->pool * adapter->num_rx_queues_per_pool;
4862         netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
4863                    accel->pool, adapter->num_rx_pools,
4864                    baseq, baseq + adapter->num_rx_queues_per_pool,
4865                    adapter->fwd_bitmask);
4866
4867         accel->netdev = vdev;
4868         accel->rx_base_queue = rxbase = baseq;
4869         accel->tx_base_queue = txbase = baseq;
4870
4871         for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
4872                 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4873
4874         for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4875                 adapter->rx_ring[rxbase + i]->netdev = vdev;
4876                 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
4877                 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
4878         }
4879
4880         for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4881                 adapter->tx_ring[txbase + i]->netdev = vdev;
4882                 adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
4883         }
4884
4885         queues = min_t(unsigned int,
4886                        adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
4887         err = netif_set_real_num_tx_queues(vdev, queues);
4888         if (err)
4889                 goto fwd_queue_err;
4890
4891         err = netif_set_real_num_rx_queues(vdev, queues);
4892         if (err)
4893                 goto fwd_queue_err;
4894
4895         if (is_valid_ether_addr(vdev->dev_addr))
4896                 ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
4897
4898         ixgbe_fwd_psrtype(accel);
4899         ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
4900         return err;
4901 fwd_queue_err:
4902         ixgbe_fwd_ring_down(vdev, accel);
4903         return err;
4904 }
4905
4906 static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
4907 {
4908         struct net_device *upper;
4909         struct list_head *iter;
4910         int err;
4911
4912         netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4913                 if (netif_is_macvlan(upper)) {
4914                         struct macvlan_dev *dfwd = netdev_priv(upper);
4915                         struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
4916
4917                         if (dfwd->fwd_priv) {
4918                                 err = ixgbe_fwd_ring_up(upper, vadapter);
4919                                 if (err)
4920                                         continue;
4921                         }
4922                 }
4923         }
4924 }
4925
4926 static void ixgbe_configure(struct ixgbe_adapter *adapter)
4927 {
4928         struct ixgbe_hw *hw = &adapter->hw;
4929
4930         ixgbe_configure_pb(adapter);
4931 #ifdef CONFIG_IXGBE_DCB
4932         ixgbe_configure_dcb(adapter);
4933 #endif
4934         /*
4935          * We must restore virtualization before VLANs or else
4936          * the VLVF registers will not be populated
4937          */
4938         ixgbe_configure_virtualization(adapter);
4939
4940         ixgbe_set_rx_mode(adapter->netdev);
4941         ixgbe_restore_vlan(adapter);
4942
4943         switch (hw->mac.type) {
4944         case ixgbe_mac_82599EB:
4945         case ixgbe_mac_X540:
4946                 hw->mac.ops.disable_rx_buff(hw);
4947                 break;
4948         default:
4949                 break;
4950         }
4951
4952         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
4953                 ixgbe_init_fdir_signature_82599(&adapter->hw,
4954                                                 adapter->fdir_pballoc);
4955         } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
4956                 ixgbe_init_fdir_perfect_82599(&adapter->hw,
4957                                               adapter->fdir_pballoc);
4958                 ixgbe_fdir_filter_restore(adapter);
4959         }
4960
4961         switch (hw->mac.type) {
4962         case ixgbe_mac_82599EB:
4963         case ixgbe_mac_X540:
4964                 hw->mac.ops.enable_rx_buff(hw);
4965                 break;
4966         default:
4967                 break;
4968         }
4969
4970 #ifdef CONFIG_IXGBE_DCA
4971         /* configure DCA */
4972         if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
4973                 ixgbe_setup_dca(adapter);
4974 #endif /* CONFIG_IXGBE_DCA */
4975
4976 #ifdef IXGBE_FCOE
4977         /* configure FCoE L2 filters, redirection table, and Rx control */
4978         ixgbe_configure_fcoe(adapter);
4979
4980 #endif /* IXGBE_FCOE */
4981         ixgbe_configure_tx(adapter);
4982         ixgbe_configure_rx(adapter);
4983         ixgbe_configure_dfwd(adapter);
4984 }
4985
4986 /**
4987  * ixgbe_sfp_link_config - set up SFP+ link
4988  * @adapter: pointer to private adapter struct
4989  **/
4990 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
4991 {
4992         /*
4993          * We are assuming the worst case scenario here, and that
4994          * is that an SFP was inserted/removed after the reset
4995          * but before SFP detection was enabled.  As such the best
4996          * solution is to just start searching as soon as we start
4997          */
4998         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
4999                 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5000
5001         adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5002         adapter->sfp_poll_time = 0;
5003 }
5004
5005 /**
5006  * ixgbe_non_sfp_link_config - set up non-SFP+ link
5007  * @hw: pointer to private hardware struct
5008  *
5009  * Returns 0 on success, negative on failure
5010  **/
5011 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5012 {
5013         u32 speed;
5014         bool autoneg, link_up = false;
5015         int ret = IXGBE_ERR_LINK_SETUP;
5016
5017         if (hw->mac.ops.check_link)
5018                 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5019
5020         if (ret)
5021                 return ret;
5022
5023         speed = hw->phy.autoneg_advertised;
5024         if ((!speed) && (hw->mac.ops.get_link_capabilities))
5025                 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5026                                                         &autoneg);
5027         if (ret)
5028                 return ret;
5029
5030         if (hw->mac.ops.setup_link)
5031                 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5032
5033         return ret;
5034 }
5035
5036 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5037 {
5038         struct ixgbe_hw *hw = &adapter->hw;
5039         u32 gpie = 0;
5040
5041         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5042                 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5043                        IXGBE_GPIE_OCD;
5044                 gpie |= IXGBE_GPIE_EIAME;
5045                 /*
5046                  * use EIAM to auto-mask when MSI-X interrupt is asserted
5047                  * this saves a register write for every interrupt
5048                  */
5049                 switch (hw->mac.type) {
5050                 case ixgbe_mac_82598EB:
5051                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5052                         break;
5053                 case ixgbe_mac_82599EB:
5054                 case ixgbe_mac_X540:
5055                 case ixgbe_mac_X550:
5056                 case ixgbe_mac_X550EM_x:
5057                 default:
5058                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5059                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5060                         break;
5061                 }
5062         } else {
5063                 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
5064                  * specifically only auto mask tx and rx interrupts */
5065                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5066         }
5067
5068         /* XXX: to interrupt immediately for EICS writes, enable this */
5069         /* gpie |= IXGBE_GPIE_EIMEN; */
5070
5071         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5072                 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5073
5074                 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5075                 case IXGBE_82599_VMDQ_8Q_MASK:
5076                         gpie |= IXGBE_GPIE_VTMODE_16;
5077                         break;
5078                 case IXGBE_82599_VMDQ_4Q_MASK:
5079                         gpie |= IXGBE_GPIE_VTMODE_32;
5080                         break;
5081                 default:
5082                         gpie |= IXGBE_GPIE_VTMODE_64;
5083                         break;
5084                 }
5085         }
5086
5087         /* Enable Thermal over heat sensor interrupt */
5088         if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5089                 switch (adapter->hw.mac.type) {
5090                 case ixgbe_mac_82599EB:
5091                         gpie |= IXGBE_SDP0_GPIEN_8259X;
5092                         break;
5093                 default:
5094                         break;
5095                 }
5096         }
5097
5098         /* Enable fan failure interrupt */
5099         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5100                 gpie |= IXGBE_SDP1_GPIEN(hw);
5101
5102         switch (hw->mac.type) {
5103         case ixgbe_mac_82599EB:
5104                 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5105                 break;
5106         case ixgbe_mac_X550EM_x:
5107                 gpie |= IXGBE_SDP0_GPIEN_X540;
5108                 break;
5109         default:
5110                 break;
5111         }
5112
5113         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5114 }
5115
5116 static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5117 {
5118         struct ixgbe_hw *hw = &adapter->hw;
5119         int err;
5120         u32 ctrl_ext;
5121
5122         ixgbe_get_hw_control(adapter);
5123         ixgbe_setup_gpie(adapter);
5124
5125         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5126                 ixgbe_configure_msix(adapter);
5127         else
5128                 ixgbe_configure_msi_and_legacy(adapter);
5129
5130         /* enable the optics for 82599 SFP+ fiber */
5131         if (hw->mac.ops.enable_tx_laser)
5132                 hw->mac.ops.enable_tx_laser(hw);
5133
5134         if (hw->phy.ops.set_phy_power)
5135                 hw->phy.ops.set_phy_power(hw, true);
5136
5137         smp_mb__before_atomic();
5138         clear_bit(__IXGBE_DOWN, &adapter->state);
5139         ixgbe_napi_enable_all(adapter);
5140
5141         if (ixgbe_is_sfp(hw)) {
5142                 ixgbe_sfp_link_config(adapter);
5143         } else {
5144                 err = ixgbe_non_sfp_link_config(hw);
5145                 if (err)
5146                         e_err(probe, "link_config FAILED %d\n", err);
5147         }
5148
5149         /* clear any pending interrupts, may auto mask */
5150         IXGBE_READ_REG(hw, IXGBE_EICR);
5151         ixgbe_irq_enable(adapter, true, true);
5152
5153         /*
5154          * If this adapter has a fan, check to see if we had a failure
5155          * before we enabled the interrupt.
5156          */
5157         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5158                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5159                 if (esdp & IXGBE_ESDP_SDP1)
5160                         e_crit(drv, "Fan has stopped, replace the adapter\n");
5161         }
5162
5163         /* bring the link up in the watchdog, this could race with our first
5164          * link up interrupt but shouldn't be a problem */
5165         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5166         adapter->link_check_timeout = jiffies;
5167         mod_timer(&adapter->service_timer, jiffies);
5168
5169         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
5170         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5171         ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5172         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5173 }
5174
5175 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5176 {
5177         WARN_ON(in_interrupt());
5178         /* put off any impending NetWatchDogTimeout */
5179         adapter->netdev->trans_start = jiffies;
5180
5181         while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5182                 usleep_range(1000, 2000);
5183         ixgbe_down(adapter);
5184         /*
5185          * If SR-IOV enabled then wait a bit before bringing the adapter
5186          * back up to give the VFs time to respond to the reset.  The
5187          * two second wait is based upon the watchdog timer cycle in
5188          * the VF driver.
5189          */
5190         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5191                 msleep(2000);
5192         ixgbe_up(adapter);
5193         clear_bit(__IXGBE_RESETTING, &adapter->state);
5194 }
5195
5196 void ixgbe_up(struct ixgbe_adapter *adapter)
5197 {
5198         /* hardware has been reset, we need to reload some things */
5199         ixgbe_configure(adapter);
5200
5201         ixgbe_up_complete(adapter);
5202 }
5203
5204 void ixgbe_reset(struct ixgbe_adapter *adapter)
5205 {
5206         struct ixgbe_hw *hw = &adapter->hw;
5207         struct net_device *netdev = adapter->netdev;
5208         int err;
5209
5210         if (ixgbe_removed(hw->hw_addr))
5211                 return;
5212         /* lock SFP init bit to prevent race conditions with the watchdog */
5213         while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5214                 usleep_range(1000, 2000);
5215
5216         /* clear all SFP and link config related flags while holding SFP_INIT */
5217         adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5218                              IXGBE_FLAG2_SFP_NEEDS_RESET);
5219         adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5220
5221         err = hw->mac.ops.init_hw(hw);
5222         switch (err) {
5223         case 0:
5224         case IXGBE_ERR_SFP_NOT_PRESENT:
5225         case IXGBE_ERR_SFP_NOT_SUPPORTED:
5226                 break;
5227         case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5228                 e_dev_err("master disable timed out\n");
5229                 break;
5230         case IXGBE_ERR_EEPROM_VERSION:
5231                 /* We are running on a pre-production device, log a warning */
5232                 e_dev_warn("This device is a pre-production adapter/LOM. "
5233                            "Please be aware there may be issues associated with "
5234                            "your hardware.  If you are experiencing problems "
5235                            "please contact your Intel or hardware "
5236                            "representative who provided you with this "
5237                            "hardware.\n");
5238                 break;
5239         default:
5240                 e_dev_err("Hardware Error: %d\n", err);
5241         }
5242
5243         clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5244
5245         /* flush entries out of MAC table */
5246         ixgbe_flush_sw_mac_table(adapter);
5247         __dev_uc_unsync(netdev, NULL);
5248
5249         /* do not flush user set addresses */
5250         ixgbe_mac_set_default_filter(adapter);
5251
5252         /* update SAN MAC vmdq pool selection */
5253         if (hw->mac.san_mac_rar_index)
5254                 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5255
5256         if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5257                 ixgbe_ptp_reset(adapter);
5258
5259         if (hw->phy.ops.set_phy_power) {
5260                 if (!netif_running(adapter->netdev) && !adapter->wol)
5261                         hw->phy.ops.set_phy_power(hw, false);
5262                 else
5263                         hw->phy.ops.set_phy_power(hw, true);
5264         }
5265 }
5266
5267 /**
5268  * ixgbe_clean_tx_ring - Free Tx Buffers
5269  * @tx_ring: ring to be cleaned
5270  **/
5271 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5272 {
5273         struct ixgbe_tx_buffer *tx_buffer_info;
5274         unsigned long size;
5275         u16 i;
5276
5277         /* ring already cleared, nothing to do */
5278         if (!tx_ring->tx_buffer_info)
5279                 return;
5280
5281         /* Free all the Tx ring sk_buffs */
5282         for (i = 0; i < tx_ring->count; i++) {
5283                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5284                 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
5285         }
5286
5287         netdev_tx_reset_queue(txring_txq(tx_ring));
5288
5289         size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5290         memset(tx_ring->tx_buffer_info, 0, size);
5291
5292         /* Zero out the descriptor ring */
5293         memset(tx_ring->desc, 0, tx_ring->size);
5294
5295         tx_ring->next_to_use = 0;
5296         tx_ring->next_to_clean = 0;
5297 }
5298
5299 /**
5300  * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
5301  * @adapter: board private structure
5302  **/
5303 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
5304 {
5305         int i;
5306
5307         for (i = 0; i < adapter->num_rx_queues; i++)
5308                 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
5309 }
5310
5311 /**
5312  * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
5313  * @adapter: board private structure
5314  **/
5315 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
5316 {
5317         int i;
5318
5319         for (i = 0; i < adapter->num_tx_queues; i++)
5320                 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
5321 }
5322
5323 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
5324 {
5325         struct hlist_node *node2;
5326         struct ixgbe_fdir_filter *filter;
5327
5328         spin_lock(&adapter->fdir_perfect_lock);
5329
5330         hlist_for_each_entry_safe(filter, node2,
5331                                   &adapter->fdir_filter_list, fdir_node) {
5332                 hlist_del(&filter->fdir_node);
5333                 kfree(filter);
5334         }
5335         adapter->fdir_filter_count = 0;
5336
5337         spin_unlock(&adapter->fdir_perfect_lock);
5338 }
5339
5340 void ixgbe_down(struct ixgbe_adapter *adapter)
5341 {
5342         struct net_device *netdev = adapter->netdev;
5343         struct ixgbe_hw *hw = &adapter->hw;
5344         struct net_device *upper;
5345         struct list_head *iter;
5346         int i;
5347
5348         /* signal that we are down to the interrupt handler */
5349         if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
5350                 return; /* do nothing if already down */
5351
5352         /* disable receives */
5353         hw->mac.ops.disable_rx(hw);
5354
5355         /* disable all enabled rx queues */
5356         for (i = 0; i < adapter->num_rx_queues; i++)
5357                 /* this call also flushes the previous write */
5358                 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
5359
5360         usleep_range(10000, 20000);
5361
5362         netif_tx_stop_all_queues(netdev);
5363
5364         /* call carrier off first to avoid false dev_watchdog timeouts */
5365         netif_carrier_off(netdev);
5366         netif_tx_disable(netdev);
5367
5368         /* disable any upper devices */
5369         netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
5370                 if (netif_is_macvlan(upper)) {
5371                         struct macvlan_dev *vlan = netdev_priv(upper);
5372
5373                         if (vlan->fwd_priv) {
5374                                 netif_tx_stop_all_queues(upper);
5375                                 netif_carrier_off(upper);
5376                                 netif_tx_disable(upper);
5377                         }
5378                 }
5379         }
5380
5381         ixgbe_irq_disable(adapter);
5382
5383         ixgbe_napi_disable_all(adapter);
5384
5385         adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
5386                              IXGBE_FLAG2_RESET_REQUESTED);
5387         adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5388
5389         del_timer_sync(&adapter->service_timer);
5390
5391         if (adapter->num_vfs) {
5392                 /* Clear EITR Select mapping */
5393                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
5394
5395                 /* Mark all the VFs as inactive */
5396                 for (i = 0 ; i < adapter->num_vfs; i++)
5397                         adapter->vfinfo[i].clear_to_send = false;
5398
5399                 /* ping all the active vfs to let them know we are going down */
5400                 ixgbe_ping_all_vfs(adapter);
5401
5402                 /* Disable all VFTE/VFRE TX/RX */
5403                 ixgbe_disable_tx_rx(adapter);
5404         }
5405
5406         /* disable transmits in the hardware now that interrupts are off */
5407         for (i = 0; i < adapter->num_tx_queues; i++) {
5408                 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
5409                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5410         }
5411
5412         /* Disable the Tx DMA engine on 82599 and later MAC */
5413         switch (hw->mac.type) {
5414         case ixgbe_mac_82599EB:
5415         case ixgbe_mac_X540:
5416         case ixgbe_mac_X550:
5417         case ixgbe_mac_X550EM_x:
5418                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5419                                 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5420                                  ~IXGBE_DMATXCTL_TE));
5421                 break;
5422         default:
5423                 break;
5424         }
5425
5426         if (!pci_channel_offline(adapter->pdev))
5427                 ixgbe_reset(adapter);
5428
5429         /* power down the optics for 82599 SFP+ fiber */
5430         if (hw->mac.ops.disable_tx_laser)
5431                 hw->mac.ops.disable_tx_laser(hw);
5432
5433         ixgbe_clean_all_tx_rings(adapter);
5434         ixgbe_clean_all_rx_rings(adapter);
5435 }
5436
5437 /**
5438  * ixgbe_tx_timeout - Respond to a Tx Hang
5439  * @netdev: network interface device structure
5440  **/
5441 static void ixgbe_tx_timeout(struct net_device *netdev)
5442 {
5443         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5444
5445         /* Do the reset outside of interrupt context */
5446         ixgbe_tx_timeout_reset(adapter);
5447 }
5448
5449 /**
5450  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
5451  * @adapter: board private structure to initialize
5452  *
5453  * ixgbe_sw_init initializes the Adapter private data structure.
5454  * Fields are initialized based on PCI device information and
5455  * OS network device settings (MTU size).
5456  **/
5457 static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5458 {
5459         struct ixgbe_hw *hw = &adapter->hw;
5460         struct pci_dev *pdev = adapter->pdev;
5461         unsigned int rss, fdir;
5462         u32 fwsm;
5463 #ifdef CONFIG_IXGBE_DCB
5464         int j;
5465         struct tc_configuration *tc;
5466 #endif
5467
5468         /* PCI config space info */
5469
5470         hw->vendor_id = pdev->vendor;
5471         hw->device_id = pdev->device;
5472         hw->revision_id = pdev->revision;
5473         hw->subsystem_vendor_id = pdev->subsystem_vendor;
5474         hw->subsystem_device_id = pdev->subsystem_device;
5475
5476         /* Set common capability flags and settings */
5477         rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
5478         adapter->ring_feature[RING_F_RSS].limit = rss;
5479         adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
5480         adapter->max_q_vectors = MAX_Q_VECTORS_82599;
5481         adapter->atr_sample_rate = 20;
5482         fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
5483         adapter->ring_feature[RING_F_FDIR].limit = fdir;
5484         adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
5485 #ifdef CONFIG_IXGBE_DCA
5486         adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
5487 #endif
5488 #ifdef IXGBE_FCOE
5489         adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
5490         adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5491 #ifdef CONFIG_IXGBE_DCB
5492         /* Default traffic class to use for FCoE */
5493         adapter->fcoe.up = IXGBE_FCOE_DEFTC;
5494 #endif /* CONFIG_IXGBE_DCB */
5495 #endif /* IXGBE_FCOE */
5496
5497         adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
5498                                      hw->mac.num_rar_entries,
5499                                      GFP_ATOMIC);
5500         if (!adapter->mac_table)
5501                 return -ENOMEM;
5502
5503         /* Set MAC specific capability flags and exceptions */
5504         switch (hw->mac.type) {
5505         case ixgbe_mac_82598EB:
5506                 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
5507
5508                 if (hw->device_id == IXGBE_DEV_ID_82598AT)
5509                         adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
5510
5511                 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
5512                 adapter->ring_feature[RING_F_FDIR].limit = 0;
5513                 adapter->atr_sample_rate = 0;
5514                 adapter->fdir_pballoc = 0;
5515 #ifdef IXGBE_FCOE
5516                 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
5517                 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5518 #ifdef CONFIG_IXGBE_DCB
5519                 adapter->fcoe.up = 0;
5520 #endif /* IXGBE_DCB */
5521 #endif /* IXGBE_FCOE */
5522                 break;
5523         case ixgbe_mac_82599EB:
5524                 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
5525                         adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5526                 break;
5527         case ixgbe_mac_X540:
5528                 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
5529                 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5530                         adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5531                 break;
5532         case ixgbe_mac_X550EM_x:
5533         case ixgbe_mac_X550:
5534 #ifdef CONFIG_IXGBE_DCA
5535                 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
5536 #endif
5537 #ifdef CONFIG_IXGBE_VXLAN
5538                 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
5539 #endif
5540                 break;
5541         default:
5542                 break;
5543         }
5544
5545 #ifdef IXGBE_FCOE
5546         /* FCoE support exists, always init the FCoE lock */
5547         spin_lock_init(&adapter->fcoe.lock);
5548
5549 #endif
5550         /* n-tuple support exists, always init our spinlock */
5551         spin_lock_init(&adapter->fdir_perfect_lock);
5552
5553 #ifdef CONFIG_IXGBE_DCB
5554         switch (hw->mac.type) {
5555         case ixgbe_mac_X540:
5556         case ixgbe_mac_X550:
5557         case ixgbe_mac_X550EM_x:
5558                 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
5559                 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
5560                 break;
5561         default:
5562                 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
5563                 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
5564                 break;
5565         }
5566
5567         /* Configure DCB traffic classes */
5568         for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
5569                 tc = &adapter->dcb_cfg.tc_config[j];
5570                 tc->path[DCB_TX_CONFIG].bwg_id = 0;
5571                 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
5572                 tc->path[DCB_RX_CONFIG].bwg_id = 0;
5573                 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
5574                 tc->dcb_pfc = pfc_disabled;
5575         }
5576
5577         /* Initialize default user to priority mapping, UPx->TC0 */
5578         tc = &adapter->dcb_cfg.tc_config[0];
5579         tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
5580         tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
5581
5582         adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
5583         adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5584         adapter->dcb_cfg.pfc_mode_enable = false;
5585         adapter->dcb_set_bitmap = 0x00;
5586         adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
5587         memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
5588                sizeof(adapter->temp_dcb_cfg));
5589
5590 #endif
5591
5592         /* default flow control settings */
5593         hw->fc.requested_mode = ixgbe_fc_full;
5594         hw->fc.current_mode = ixgbe_fc_full;    /* init for ethtool output */
5595         ixgbe_pbthresh_setup(adapter);
5596         hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
5597         hw->fc.send_xon = true;
5598         hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
5599
5600 #ifdef CONFIG_PCI_IOV
5601         if (max_vfs > 0)
5602                 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
5603
5604         /* assign number of SR-IOV VFs */
5605         if (hw->mac.type != ixgbe_mac_82598EB) {
5606                 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
5607                         adapter->num_vfs = 0;
5608                         e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
5609                 } else {
5610                         adapter->num_vfs = max_vfs;
5611                 }
5612         }
5613 #endif /* CONFIG_PCI_IOV */
5614
5615         /* enable itr by default in dynamic mode */
5616         adapter->rx_itr_setting = 1;
5617         adapter->tx_itr_setting = 1;
5618
5619         /* set default ring sizes */
5620         adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5621         adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5622
5623         /* set default work limits */
5624         adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
5625
5626         /* initialize eeprom parameters */
5627         if (ixgbe_init_eeprom_params_generic(hw)) {
5628                 e_dev_err("EEPROM initialization failed\n");
5629                 return -EIO;
5630         }
5631
5632         /* PF holds first pool slot */
5633         set_bit(0, &adapter->fwd_bitmask);
5634         set_bit(__IXGBE_DOWN, &adapter->state);
5635
5636         return 0;
5637 }
5638
5639 /**
5640  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
5641  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
5642  *
5643  * Return 0 on success, negative on failure
5644  **/
5645 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
5646 {
5647         struct device *dev = tx_ring->dev;
5648         int orig_node = dev_to_node(dev);
5649         int ring_node = -1;
5650         int size;
5651
5652         size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5653
5654         if (tx_ring->q_vector)
5655                 ring_node = tx_ring->q_vector->numa_node;
5656
5657         tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
5658         if (!tx_ring->tx_buffer_info)
5659                 tx_ring->tx_buffer_info = vzalloc(size);
5660         if (!tx_ring->tx_buffer_info)
5661                 goto err;
5662
5663         u64_stats_init(&tx_ring->syncp);
5664
5665         /* round up to nearest 4K */
5666         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
5667         tx_ring->size = ALIGN(tx_ring->size, 4096);
5668
5669         set_dev_node(dev, ring_node);
5670         tx_ring->desc = dma_alloc_coherent(dev,
5671                                            tx_ring->size,
5672                                            &tx_ring->dma,
5673                                            GFP_KERNEL);
5674         set_dev_node(dev, orig_node);
5675         if (!tx_ring->desc)
5676                 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
5677                                                    &tx_ring->dma, GFP_KERNEL);
5678         if (!tx_ring->desc)
5679                 goto err;
5680
5681         tx_ring->next_to_use = 0;
5682         tx_ring->next_to_clean = 0;
5683         return 0;
5684
5685 err:
5686         vfree(tx_ring->tx_buffer_info);
5687         tx_ring->tx_buffer_info = NULL;
5688         dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
5689         return -ENOMEM;
5690 }
5691
5692 /**
5693  * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5694  * @adapter: board private structure
5695  *
5696  * If this function returns with an error, then it's possible one or
5697  * more of the rings is populated (while the rest are not).  It is the
5698  * callers duty to clean those orphaned rings.
5699  *
5700  * Return 0 on success, negative on failure
5701  **/
5702 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5703 {
5704         int i, err = 0;
5705
5706         for (i = 0; i < adapter->num_tx_queues; i++) {
5707                 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
5708                 if (!err)
5709                         continue;
5710
5711                 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
5712                 goto err_setup_tx;
5713         }
5714
5715         return 0;
5716 err_setup_tx:
5717         /* rewind the index freeing the rings as we go */
5718         while (i--)
5719                 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5720         return err;
5721 }
5722
5723 /**
5724  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5725  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
5726  *
5727  * Returns 0 on success, negative on failure
5728  **/
5729 int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5730 {
5731         struct device *dev = rx_ring->dev;
5732         int orig_node = dev_to_node(dev);
5733         int ring_node = -1;
5734         int size;
5735
5736         size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5737
5738         if (rx_ring->q_vector)
5739                 ring_node = rx_ring->q_vector->numa_node;
5740
5741         rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
5742         if (!rx_ring->rx_buffer_info)
5743                 rx_ring->rx_buffer_info = vzalloc(size);
5744         if (!rx_ring->rx_buffer_info)
5745                 goto err;
5746
5747         u64_stats_init(&rx_ring->syncp);
5748
5749         /* Round up to nearest 4K */
5750         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5751         rx_ring->size = ALIGN(rx_ring->size, 4096);
5752
5753         set_dev_node(dev, ring_node);
5754         rx_ring->desc = dma_alloc_coherent(dev,
5755                                            rx_ring->size,
5756                                            &rx_ring->dma,
5757                                            GFP_KERNEL);
5758         set_dev_node(dev, orig_node);
5759         if (!rx_ring->desc)
5760                 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
5761                                                    &rx_ring->dma, GFP_KERNEL);
5762         if (!rx_ring->desc)
5763                 goto err;
5764
5765         rx_ring->next_to_clean = 0;
5766         rx_ring->next_to_use = 0;
5767
5768         return 0;
5769 err:
5770         vfree(rx_ring->rx_buffer_info);
5771         rx_ring->rx_buffer_info = NULL;
5772         dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
5773         return -ENOMEM;
5774 }
5775
5776 /**
5777  * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5778  * @adapter: board private structure
5779  *
5780  * If this function returns with an error, then it's possible one or
5781  * more of the rings is populated (while the rest are not).  It is the
5782  * callers duty to clean those orphaned rings.
5783  *
5784  * Return 0 on success, negative on failure
5785  **/
5786 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5787 {
5788         int i, err = 0;
5789
5790         for (i = 0; i < adapter->num_rx_queues; i++) {
5791                 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
5792                 if (!err)
5793                         continue;
5794
5795                 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
5796                 goto err_setup_rx;
5797         }
5798
5799 #ifdef IXGBE_FCOE
5800         err = ixgbe_setup_fcoe_ddp_resources(adapter);
5801         if (!err)
5802 #endif
5803                 return 0;
5804 err_setup_rx:
5805         /* rewind the index freeing the rings as we go */
5806         while (i--)
5807                 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5808         return err;
5809 }
5810
5811 /**
5812  * ixgbe_free_tx_resources - Free Tx Resources per Queue
5813  * @tx_ring: Tx descriptor ring for a specific queue
5814  *
5815  * Free all transmit software resources
5816  **/
5817 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5818 {
5819         ixgbe_clean_tx_ring(tx_ring);
5820
5821         vfree(tx_ring->tx_buffer_info);
5822         tx_ring->tx_buffer_info = NULL;
5823
5824         /* if not set, then don't free */
5825         if (!tx_ring->desc)
5826                 return;
5827
5828         dma_free_coherent(tx_ring->dev, tx_ring->size,
5829                           tx_ring->desc, tx_ring->dma);
5830
5831         tx_ring->desc = NULL;
5832 }
5833
5834 /**
5835  * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5836  * @adapter: board private structure
5837  *
5838  * Free all transmit software resources
5839  **/
5840 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5841 {
5842         int i;
5843
5844         for (i = 0; i < adapter->num_tx_queues; i++)
5845                 if (adapter->tx_ring[i]->desc)
5846                         ixgbe_free_tx_resources(adapter->tx_ring[i]);
5847 }
5848
5849 /**
5850  * ixgbe_free_rx_resources - Free Rx Resources
5851  * @rx_ring: ring to clean the resources from
5852  *
5853  * Free all receive software resources
5854  **/
5855 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5856 {
5857         ixgbe_clean_rx_ring(rx_ring);
5858
5859         vfree(rx_ring->rx_buffer_info);
5860         rx_ring->rx_buffer_info = NULL;
5861
5862         /* if not set, then don't free */
5863         if (!rx_ring->desc)
5864                 return;
5865
5866         dma_free_coherent(rx_ring->dev, rx_ring->size,
5867                           rx_ring->desc, rx_ring->dma);
5868
5869         rx_ring->desc = NULL;
5870 }
5871
5872 /**
5873  * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5874  * @adapter: board private structure
5875  *
5876  * Free all receive software resources
5877  **/
5878 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5879 {
5880         int i;
5881
5882 #ifdef IXGBE_FCOE
5883         ixgbe_free_fcoe_ddp_resources(adapter);
5884
5885 #endif
5886         for (i = 0; i < adapter->num_rx_queues; i++)
5887                 if (adapter->rx_ring[i]->desc)
5888                         ixgbe_free_rx_resources(adapter->rx_ring[i]);
5889 }
5890
5891 /**
5892  * ixgbe_change_mtu - Change the Maximum Transfer Unit
5893  * @netdev: network interface device structure
5894  * @new_mtu: new value for maximum frame size
5895  *
5896  * Returns 0 on success, negative on failure
5897  **/
5898 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5899 {
5900         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5901         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5902
5903         /* MTU < 68 is an error and causes problems on some kernels */
5904         if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5905                 return -EINVAL;
5906
5907         /*
5908          * For 82599EB we cannot allow legacy VFs to enable their receive
5909          * paths when MTU greater than 1500 is configured.  So display a
5910          * warning that legacy VFs will be disabled.
5911          */
5912         if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
5913             (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
5914             (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
5915                 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
5916
5917         e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5918
5919         /* must set new MTU before calling down or up */
5920         netdev->mtu = new_mtu;
5921
5922         if (netif_running(netdev))
5923                 ixgbe_reinit_locked(adapter);
5924
5925         return 0;
5926 }
5927
5928 /**
5929  * ixgbe_open - Called when a network interface is made active
5930  * @netdev: network interface device structure
5931  *
5932  * Returns 0 on success, negative value on failure
5933  *
5934  * The open entry point is called when a network interface is made
5935  * active by the system (IFF_UP).  At this point all resources needed
5936  * for transmit and receive operations are allocated, the interrupt
5937  * handler is registered with the OS, the watchdog timer is started,
5938  * and the stack is notified that the interface is ready.
5939  **/
5940 static int ixgbe_open(struct net_device *netdev)
5941 {
5942         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5943         struct ixgbe_hw *hw = &adapter->hw;
5944         int err, queues;
5945
5946         /* disallow open during test */
5947         if (test_bit(__IXGBE_TESTING, &adapter->state))
5948                 return -EBUSY;
5949
5950         netif_carrier_off(netdev);
5951
5952         /* allocate transmit descriptors */
5953         err = ixgbe_setup_all_tx_resources(adapter);
5954         if (err)
5955                 goto err_setup_tx;
5956
5957         /* allocate receive descriptors */
5958         err = ixgbe_setup_all_rx_resources(adapter);
5959         if (err)
5960                 goto err_setup_rx;
5961
5962         ixgbe_configure(adapter);
5963
5964         err = ixgbe_request_irq(adapter);
5965         if (err)
5966                 goto err_req_irq;
5967
5968         /* Notify the stack of the actual queue counts. */
5969         if (adapter->num_rx_pools > 1)
5970                 queues = adapter->num_rx_queues_per_pool;
5971         else
5972                 queues = adapter->num_tx_queues;
5973
5974         err = netif_set_real_num_tx_queues(netdev, queues);
5975         if (err)
5976                 goto err_set_queues;
5977
5978         if (adapter->num_rx_pools > 1 &&
5979             adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
5980                 queues = IXGBE_MAX_L2A_QUEUES;
5981         else
5982                 queues = adapter->num_rx_queues;
5983         err = netif_set_real_num_rx_queues(netdev, queues);
5984         if (err)
5985                 goto err_set_queues;
5986
5987         ixgbe_ptp_init(adapter);
5988
5989         ixgbe_up_complete(adapter);
5990
5991         ixgbe_clear_vxlan_port(adapter);
5992 #ifdef CONFIG_IXGBE_VXLAN
5993         vxlan_get_rx_port(netdev);
5994 #endif
5995
5996         return 0;
5997
5998 err_set_queues:
5999         ixgbe_free_irq(adapter);
6000 err_req_irq:
6001         ixgbe_free_all_rx_resources(adapter);
6002         if (hw->phy.ops.set_phy_power && !adapter->wol)
6003                 hw->phy.ops.set_phy_power(&adapter->hw, false);
6004 err_setup_rx:
6005         ixgbe_free_all_tx_resources(adapter);
6006 err_setup_tx:
6007         ixgbe_reset(adapter);
6008
6009         return err;
6010 }
6011
6012 static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6013 {
6014         ixgbe_ptp_suspend(adapter);
6015
6016         if (adapter->hw.phy.ops.enter_lplu) {
6017                 adapter->hw.phy.reset_disable = true;
6018                 ixgbe_down(adapter);
6019                 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6020                 adapter->hw.phy.reset_disable = false;
6021         } else {
6022                 ixgbe_down(adapter);
6023         }
6024
6025         ixgbe_free_irq(adapter);
6026
6027         ixgbe_free_all_tx_resources(adapter);
6028         ixgbe_free_all_rx_resources(adapter);
6029 }
6030
6031 /**
6032  * ixgbe_close - Disables a network interface
6033  * @netdev: network interface device structure
6034  *
6035  * Returns 0, this is not allowed to fail
6036  *
6037  * The close entry point is called when an interface is de-activated
6038  * by the OS.  The hardware is still under the drivers control, but
6039  * needs to be disabled.  A global MAC reset is issued to stop the
6040  * hardware, and all transmit and receive resources are freed.
6041  **/
6042 static int ixgbe_close(struct net_device *netdev)
6043 {
6044         struct ixgbe_adapter *adapter = netdev_priv(netdev);
6045
6046         ixgbe_ptp_stop(adapter);
6047
6048         ixgbe_close_suspend(adapter);
6049
6050         ixgbe_fdir_filter_exit(adapter);
6051
6052         ixgbe_release_hw_control(adapter);
6053
6054         return 0;
6055 }
6056
6057 #ifdef CONFIG_PM
6058 static int ixgbe_resume(struct pci_dev *pdev)
6059 {
6060         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6061         struct net_device *netdev = adapter->netdev;
6062         u32 err;
6063
6064         adapter->hw.hw_addr = adapter->io_addr;
6065         pci_set_power_state(pdev, PCI_D0);
6066         pci_restore_state(pdev);
6067         /*
6068          * pci_restore_state clears dev->state_saved so call
6069          * pci_save_state to restore it.
6070          */
6071         pci_save_state(pdev);
6072
6073         err = pci_enable_device_mem(pdev);
6074         if (err) {
6075                 e_dev_err("Cannot enable PCI device from suspend\n");
6076                 return err;
6077         }
6078         smp_mb__before_atomic();
6079         clear_bit(__IXGBE_DISABLED, &adapter->state);
6080         pci_set_master(pdev);
6081
6082         pci_wake_from_d3(pdev, false);
6083
6084         ixgbe_reset(adapter);
6085
6086         IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6087
6088         rtnl_lock();
6089         err = ixgbe_init_interrupt_scheme(adapter);
6090         if (!err && netif_running(netdev))
6091                 err = ixgbe_open(netdev);
6092
6093         rtnl_unlock();
6094
6095         if (err)
6096                 return err;
6097
6098         netif_device_attach(netdev);
6099
6100         return 0;
6101 }
6102 #endif /* CONFIG_PM */
6103
6104 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6105 {
6106         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6107         struct net_device *netdev = adapter->netdev;
6108         struct ixgbe_hw *hw = &adapter->hw;
6109         u32 ctrl, fctrl;
6110         u32 wufc = adapter->wol;
6111 #ifdef CONFIG_PM
6112         int retval = 0;
6113 #endif
6114
6115         netif_device_detach(netdev);
6116
6117         rtnl_lock();
6118         if (netif_running(netdev))
6119                 ixgbe_close_suspend(adapter);
6120         rtnl_unlock();
6121
6122         ixgbe_clear_interrupt_scheme(adapter);
6123
6124 #ifdef CONFIG_PM
6125         retval = pci_save_state(pdev);
6126         if (retval)
6127                 return retval;
6128
6129 #endif
6130         if (hw->mac.ops.stop_link_on_d3)
6131                 hw->mac.ops.stop_link_on_d3(hw);
6132
6133         if (wufc) {
6134                 ixgbe_set_rx_mode(netdev);
6135
6136                 /* enable the optics for 82599 SFP+ fiber as we can WoL */
6137                 if (hw->mac.ops.enable_tx_laser)
6138                         hw->mac.ops.enable_tx_laser(hw);
6139
6140                 /* turn on all-multi mode if wake on multicast is enabled */
6141                 if (wufc & IXGBE_WUFC_MC) {
6142                         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6143                         fctrl |= IXGBE_FCTRL_MPE;
6144                         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6145                 }
6146
6147                 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6148                 ctrl |= IXGBE_CTRL_GIO_DIS;
6149                 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6150
6151                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6152         } else {
6153                 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6154                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6155         }
6156
6157         switch (hw->mac.type) {
6158         case ixgbe_mac_82598EB:
6159                 pci_wake_from_d3(pdev, false);
6160                 break;
6161         case ixgbe_mac_82599EB:
6162         case ixgbe_mac_X540:
6163         case ixgbe_mac_X550:
6164         case ixgbe_mac_X550EM_x:
6165                 pci_wake_from_d3(pdev, !!wufc);
6166                 break;
6167         default:
6168                 break;
6169         }
6170
6171         *enable_wake = !!wufc;
6172         if (hw->phy.ops.set_phy_power && !*enable_wake)
6173                 hw->phy.ops.set_phy_power(hw, false);
6174
6175         ixgbe_release_hw_control(adapter);
6176
6177         if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6178                 pci_disable_device(pdev);
6179
6180         return 0;
6181 }
6182
6183 #ifdef CONFIG_PM
6184 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
6185 {
6186         int retval;
6187         bool wake;
6188
6189         retval = __ixgbe_shutdown(pdev, &wake);
6190         if (retval)
6191                 return retval;
6192
6193         if (wake) {
6194                 pci_prepare_to_sleep(pdev);
6195         } else {
6196                 pci_wake_from_d3(pdev, false);
6197                 pci_set_power_state(pdev, PCI_D3hot);
6198         }
6199
6200         return 0;
6201 }
6202 #endif /* CONFIG_PM */
6203
6204 static void ixgbe_shutdown(struct pci_dev *pdev)
6205 {
6206         bool wake;
6207
6208         __ixgbe_shutdown(pdev, &wake);
6209
6210         if (system_state == SYSTEM_POWER_OFF) {
6211                 pci_wake_from_d3(pdev, wake);
6212                 pci_set_power_state(pdev, PCI_D3hot);
6213         }
6214 }
6215
6216 /**
6217  * ixgbe_update_stats - Update the board statistics counters.
6218  * @adapter: board private structure
6219  **/
6220 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
6221 {
6222         struct net_device *netdev = adapter->netdev;
6223         struct ixgbe_hw *hw = &adapter->hw;
6224         struct ixgbe_hw_stats *hwstats = &adapter->stats;
6225         u64 total_mpc = 0;
6226         u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
6227         u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
6228         u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
6229         u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
6230
6231         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6232             test_bit(__IXGBE_RESETTING, &adapter->state))
6233                 return;
6234
6235         if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
6236                 u64 rsc_count = 0;
6237                 u64 rsc_flush = 0;
6238                 for (i = 0; i < adapter->num_rx_queues; i++) {
6239                         rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
6240                         rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
6241                 }
6242                 adapter->rsc_total_count = rsc_count;
6243                 adapter->rsc_total_flush = rsc_flush;
6244         }
6245
6246         for (i = 0; i < adapter->num_rx_queues; i++) {
6247                 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
6248                 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
6249                 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
6250                 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
6251                 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
6252                 bytes += rx_ring->stats.bytes;
6253                 packets += rx_ring->stats.packets;
6254         }
6255         adapter->non_eop_descs = non_eop_descs;
6256         adapter->alloc_rx_page_failed = alloc_rx_page_failed;
6257         adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
6258         adapter->hw_csum_rx_error = hw_csum_rx_error;
6259         netdev->stats.rx_bytes = bytes;
6260         netdev->stats.rx_packets = packets;
6261
6262         bytes = 0;
6263         packets = 0;
6264         /* gather some stats to the adapter struct that are per queue */
6265         for (i = 0; i < adapter->num_tx_queues; i++) {
6266                 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6267                 restart_queue += tx_ring->tx_stats.restart_queue;
6268                 tx_busy += tx_ring->tx_stats.tx_busy;
6269                 bytes += tx_ring->stats.bytes;
6270                 packets += tx_ring->stats.packets;
6271         }
6272         adapter->restart_queue = restart_queue;
6273         adapter->tx_busy = tx_busy;
6274         netdev->stats.tx_bytes = bytes;
6275         netdev->stats.tx_packets = packets;
6276
6277         hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6278
6279         /* 8 register reads */
6280         for (i = 0; i < 8; i++) {
6281                 /* for packet buffers not used, the register should read 0 */
6282                 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
6283                 missed_rx += mpc;
6284                 hwstats->mpc[i] += mpc;
6285                 total_mpc += hwstats->mpc[i];
6286                 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
6287                 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6288                 switch (hw->mac.type) {
6289                 case ixgbe_mac_82598EB:
6290                         hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
6291                         hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
6292                         hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
6293                         hwstats->pxonrxc[i] +=
6294                                 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
6295                         break;
6296                 case ixgbe_mac_82599EB:
6297                 case ixgbe_mac_X540:
6298                 case ixgbe_mac_X550:
6299                 case ixgbe_mac_X550EM_x:
6300                         hwstats->pxonrxc[i] +=
6301                                 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
6302                         break;
6303                 default:
6304                         break;
6305                 }
6306         }
6307
6308         /*16 register reads */
6309         for (i = 0; i < 16; i++) {
6310                 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
6311                 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
6312                 if ((hw->mac.type == ixgbe_mac_82599EB) ||
6313                     (hw->mac.type == ixgbe_mac_X540) ||
6314                     (hw->mac.type == ixgbe_mac_X550) ||
6315                     (hw->mac.type == ixgbe_mac_X550EM_x)) {
6316                         hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
6317                         IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
6318                         hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
6319                         IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
6320                 }
6321         }
6322
6323         hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6324         /* work around hardware counting issue */
6325         hwstats->gprc -= missed_rx;
6326
6327         ixgbe_update_xoff_received(adapter);
6328
6329         /* 82598 hardware only has a 32 bit counter in the high register */
6330         switch (hw->mac.type) {
6331         case ixgbe_mac_82598EB:
6332                 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
6333                 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
6334                 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
6335                 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
6336                 break;
6337         case ixgbe_mac_X540:
6338         case ixgbe_mac_X550:
6339         case ixgbe_mac_X550EM_x:
6340                 /* OS2BMC stats are X540 and later */
6341                 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
6342                 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
6343                 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
6344                 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
6345         case ixgbe_mac_82599EB:
6346                 for (i = 0; i < 16; i++)
6347                         adapter->hw_rx_no_dma_resources +=
6348                                              IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
6349                 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
6350                 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
6351                 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
6352                 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
6353                 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
6354                 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
6355                 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
6356                 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
6357                 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6358 #ifdef IXGBE_FCOE
6359                 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
6360                 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
6361                 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
6362                 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
6363                 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
6364                 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
6365                 /* Add up per cpu counters for total ddp aloc fail */
6366                 if (adapter->fcoe.ddp_pool) {
6367                         struct ixgbe_fcoe *fcoe = &adapter->fcoe;
6368                         struct ixgbe_fcoe_ddp_pool *ddp_pool;
6369                         unsigned int cpu;
6370                         u64 noddp = 0, noddp_ext_buff = 0;
6371                         for_each_possible_cpu(cpu) {
6372                                 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
6373                                 noddp += ddp_pool->noddp;
6374                                 noddp_ext_buff += ddp_pool->noddp_ext_buff;
6375                         }
6376                         hwstats->fcoe_noddp = noddp;
6377                         hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
6378                 }
6379 #endif /* IXGBE_FCOE */
6380                 break;
6381         default:
6382                 break;
6383         }
6384         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
6385         hwstats->bprc += bprc;
6386         hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
6387         if (hw->mac.type == ixgbe_mac_82598EB)
6388                 hwstats->mprc -= bprc;
6389         hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
6390         hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
6391         hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
6392         hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
6393         hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
6394         hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
6395         hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
6396         hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6397         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
6398         hwstats->lxontxc += lxon;
6399         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
6400         hwstats->lxofftxc += lxoff;
6401         hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
6402         hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
6403         /*
6404          * 82598 errata - tx of flow control packets is included in tx counters
6405          */
6406         xon_off_tot = lxon + lxoff;
6407         hwstats->gptc -= xon_off_tot;
6408         hwstats->mptc -= xon_off_tot;
6409         hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
6410         hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
6411         hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
6412         hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
6413         hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
6414         hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
6415         hwstats->ptc64 -= xon_off_tot;
6416         hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
6417         hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
6418         hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
6419         hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
6420         hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
6421         hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
6422
6423         /* Fill out the OS statistics structure */
6424         netdev->stats.multicast = hwstats->mprc;
6425
6426         /* Rx Errors */
6427         netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
6428         netdev->stats.rx_dropped = 0;
6429         netdev->stats.rx_length_errors = hwstats->rlec;
6430         netdev->stats.rx_crc_errors = hwstats->crcerrs;
6431         netdev->stats.rx_missed_errors = total_mpc;
6432 }
6433
6434 /**
6435  * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
6436  * @adapter: pointer to the device adapter structure
6437  **/
6438 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
6439 {
6440         struct ixgbe_hw *hw = &adapter->hw;
6441         int i;
6442
6443         if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
6444                 return;
6445
6446         adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6447
6448         /* if interface is down do nothing */
6449         if (test_bit(__IXGBE_DOWN, &adapter->state))
6450                 return;
6451
6452         /* do nothing if we are not using signature filters */
6453         if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
6454                 return;
6455
6456         adapter->fdir_overflow++;
6457
6458         if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
6459                 for (i = 0; i < adapter->num_tx_queues; i++)
6460                         set_bit(__IXGBE_TX_FDIR_INIT_DONE,
6461                                 &(adapter->tx_ring[i]->state));
6462                 /* re-enable flow director interrupts */
6463                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
6464         } else {
6465                 e_err(probe, "failed to finish FDIR re-initialization, "
6466                       "ignored adding FDIR ATR filters\n");
6467         }
6468 }
6469
6470 /**
6471  * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
6472  * @adapter: pointer to the device adapter structure
6473  *
6474  * This function serves two purposes.  First it strobes the interrupt lines
6475  * in order to make certain interrupts are occurring.  Secondly it sets the
6476  * bits needed to check for TX hangs.  As a result we should immediately
6477  * determine if a hang has occurred.
6478  */
6479 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
6480 {
6481         struct ixgbe_hw *hw = &adapter->hw;
6482         u64 eics = 0;
6483         int i;
6484
6485         /* If we're down, removing or resetting, just bail */
6486         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6487             test_bit(__IXGBE_REMOVING, &adapter->state) ||
6488             test_bit(__IXGBE_RESETTING, &adapter->state))
6489                 return;
6490
6491         /* Force detection of hung controller */
6492         if (netif_carrier_ok(adapter->netdev)) {
6493                 for (i = 0; i < adapter->num_tx_queues; i++)
6494                         set_check_for_tx_hang(adapter->tx_ring[i]);
6495         }
6496
6497         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
6498                 /*
6499                  * for legacy and MSI interrupts don't set any bits
6500                  * that are enabled for EIAM, because this operation
6501                  * would set *both* EIMS and EICS for any bit in EIAM
6502                  */
6503                 IXGBE_WRITE_REG(hw, IXGBE_EICS,
6504                         (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
6505         } else {
6506                 /* get one bit for every active tx/rx interrupt vector */
6507                 for (i = 0; i < adapter->num_q_vectors; i++) {
6508                         struct ixgbe_q_vector *qv = adapter->q_vector[i];
6509                         if (qv->rx.ring || qv->tx.ring)
6510                                 eics |= ((u64)1 << i);
6511                 }
6512         }
6513
6514         /* Cause software interrupt to ensure rings are cleaned */
6515         ixgbe_irq_rearm_queues(adapter, eics);
6516 }
6517
6518 /**
6519  * ixgbe_watchdog_update_link - update the link status
6520  * @adapter: pointer to the device adapter structure
6521  * @link_speed: pointer to a u32 to store the link_speed
6522  **/
6523 static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
6524 {
6525         struct ixgbe_hw *hw = &adapter->hw;
6526         u32 link_speed = adapter->link_speed;
6527         bool link_up = adapter->link_up;
6528         bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
6529
6530         if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
6531                 return;
6532
6533         if (hw->mac.ops.check_link) {
6534                 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
6535         } else {
6536                 /* always assume link is up, if no check link function */
6537                 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
6538                 link_up = true;
6539         }
6540
6541         if (adapter->ixgbe_ieee_pfc)
6542                 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
6543
6544         if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
6545                 hw->mac.ops.fc_enable(hw);
6546                 ixgbe_set_rx_drop_en(adapter);
6547         }
6548
6549         if (link_up ||
6550             time_after(jiffies, (adapter->link_check_timeout +
6551                                  IXGBE_TRY_LINK_TIMEOUT))) {
6552                 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6553                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
6554                 IXGBE_WRITE_FLUSH(hw);
6555         }
6556
6557         adapter->link_up = link_up;
6558         adapter->link_speed = link_speed;
6559 }
6560
6561 static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
6562 {
6563 #ifdef CONFIG_IXGBE_DCB
6564         struct net_device *netdev = adapter->netdev;
6565         struct dcb_app app = {
6566                               .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
6567                               .protocol = 0,
6568                              };
6569         u8 up = 0;
6570
6571         if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
6572                 up = dcb_ieee_getapp_mask(netdev, &app);
6573
6574         adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
6575 #endif
6576 }
6577
6578 /**
6579  * ixgbe_watchdog_link_is_up - update netif_carrier status and
6580  *                             print link up message
6581  * @adapter: pointer to the device adapter structure
6582  **/
6583 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6584 {
6585         struct net_device *netdev = adapter->netdev;
6586         struct ixgbe_hw *hw = &adapter->hw;
6587         struct net_device *upper;
6588         struct list_head *iter;
6589         u32 link_speed = adapter->link_speed;
6590         const char *speed_str;
6591         bool flow_rx, flow_tx;
6592
6593         /* only continue if link was previously down */
6594         if (netif_carrier_ok(netdev))
6595                 return;
6596
6597         adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
6598
6599         switch (hw->mac.type) {
6600         case ixgbe_mac_82598EB: {
6601                 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6602                 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
6603                 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
6604                 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
6605         }
6606                 break;
6607         case ixgbe_mac_X540:
6608         case ixgbe_mac_X550:
6609         case ixgbe_mac_X550EM_x:
6610         case ixgbe_mac_82599EB: {
6611                 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6612                 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6613                 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6614                 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6615         }
6616                 break;
6617         default:
6618                 flow_tx = false;
6619                 flow_rx = false;
6620                 break;
6621         }
6622
6623         adapter->last_rx_ptp_check = jiffies;
6624
6625         if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6626                 ixgbe_ptp_start_cyclecounter(adapter);
6627
6628         switch (link_speed) {
6629         case IXGBE_LINK_SPEED_10GB_FULL:
6630                 speed_str = "10 Gbps";
6631                 break;
6632         case IXGBE_LINK_SPEED_2_5GB_FULL:
6633                 speed_str = "2.5 Gbps";
6634                 break;
6635         case IXGBE_LINK_SPEED_1GB_FULL:
6636                 speed_str = "1 Gbps";
6637                 break;
6638         case IXGBE_LINK_SPEED_100_FULL:
6639                 speed_str = "100 Mbps";
6640                 break;
6641         default:
6642                 speed_str = "unknown speed";
6643                 break;
6644         }
6645         e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
6646                ((flow_rx && flow_tx) ? "RX/TX" :
6647                (flow_rx ? "RX" :
6648                (flow_tx ? "TX" : "None"))));
6649
6650         netif_carrier_on(netdev);
6651         ixgbe_check_vf_rate_limit(adapter);
6652
6653         /* enable transmits */
6654         netif_tx_wake_all_queues(adapter->netdev);
6655
6656         /* enable any upper devices */
6657         rtnl_lock();
6658         netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
6659                 if (netif_is_macvlan(upper)) {
6660                         struct macvlan_dev *vlan = netdev_priv(upper);
6661
6662                         if (vlan->fwd_priv)
6663                                 netif_tx_wake_all_queues(upper);
6664                 }
6665         }
6666         rtnl_unlock();
6667
6668         /* update the default user priority for VFs */
6669         ixgbe_update_default_up(adapter);
6670
6671         /* ping all the active vfs to let them know link has changed */
6672         ixgbe_ping_all_vfs(adapter);
6673 }
6674
6675 /**
6676  * ixgbe_watchdog_link_is_down - update netif_carrier status and
6677  *                               print link down message
6678  * @adapter: pointer to the adapter structure
6679  **/
6680 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
6681 {
6682         struct net_device *netdev = adapter->netdev;
6683         struct ixgbe_hw *hw = &adapter->hw;
6684
6685         adapter->link_up = false;
6686         adapter->link_speed = 0;
6687
6688         /* only continue if link was up previously */
6689         if (!netif_carrier_ok(netdev))
6690                 return;
6691
6692         /* poll for SFP+ cable when link is down */
6693         if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
6694                 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
6695
6696         if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6697                 ixgbe_ptp_start_cyclecounter(adapter);
6698
6699         e_info(drv, "NIC Link is Down\n");
6700         netif_carrier_off(netdev);
6701
6702         /* ping all the active vfs to let them know link has changed */
6703         ixgbe_ping_all_vfs(adapter);
6704 }
6705
6706 static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
6707 {
6708         int i;
6709
6710         for (i = 0; i < adapter->num_tx_queues; i++) {
6711                 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6712
6713                 if (tx_ring->next_to_use != tx_ring->next_to_clean)
6714                         return true;
6715         }
6716
6717         return false;
6718 }
6719
6720 static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
6721 {
6722         struct ixgbe_hw *hw = &adapter->hw;
6723         struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
6724         u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
6725
6726         int i, j;
6727
6728         if (!adapter->num_vfs)
6729                 return false;
6730
6731         /* resetting the PF is only needed for MAC before X550 */
6732         if (hw->mac.type >= ixgbe_mac_X550)
6733                 return false;
6734
6735         for (i = 0; i < adapter->num_vfs; i++) {
6736                 for (j = 0; j < q_per_pool; j++) {
6737                         u32 h, t;
6738
6739                         h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
6740                         t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
6741
6742                         if (h != t)
6743                                 return true;
6744                 }
6745         }
6746
6747         return false;
6748 }
6749
6750 /**
6751  * ixgbe_watchdog_flush_tx - flush queues on link down
6752  * @adapter: pointer to the device adapter structure
6753  **/
6754 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
6755 {
6756         if (!netif_carrier_ok(adapter->netdev)) {
6757                 if (ixgbe_ring_tx_pending(adapter) ||
6758                     ixgbe_vf_tx_pending(adapter)) {
6759                         /* We've lost link, so the controller stops DMA,
6760                          * but we've got queued Tx work that's never going
6761                          * to get done, so reset controller to flush Tx.
6762                          * (Do the reset outside of interrupt context).
6763                          */
6764                         e_warn(drv, "initiating reset to clear Tx work after link loss\n");
6765                         adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
6766                 }
6767         }
6768 }
6769
6770 #ifdef CONFIG_PCI_IOV
6771 static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
6772                                       struct pci_dev *vfdev)
6773 {
6774         if (!pci_wait_for_pending_transaction(vfdev))
6775                 e_dev_warn("Issuing VFLR with pending transactions\n");
6776
6777         e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
6778         pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
6779
6780         msleep(100);
6781 }
6782
6783 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
6784 {
6785         struct ixgbe_hw *hw = &adapter->hw;
6786         struct pci_dev *pdev = adapter->pdev;
6787         unsigned int vf;
6788         u32 gpc;
6789
6790         if (!(netif_carrier_ok(adapter->netdev)))
6791                 return;
6792
6793         gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
6794         if (gpc) /* If incrementing then no need for the check below */
6795                 return;
6796         /* Check to see if a bad DMA write target from an errant or
6797          * malicious VF has caused a PCIe error.  If so then we can
6798          * issue a VFLR to the offending VF(s) and then resume without
6799          * requesting a full slot reset.
6800          */
6801
6802         if (!pdev)
6803                 return;
6804
6805         /* check status reg for all VFs owned by this PF */
6806         for (vf = 0; vf < adapter->num_vfs; ++vf) {
6807                 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
6808                 u16 status_reg;
6809
6810                 if (!vfdev)
6811                         continue;
6812                 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
6813                 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
6814                     status_reg & PCI_STATUS_REC_MASTER_ABORT)
6815                         ixgbe_issue_vf_flr(adapter, vfdev);
6816         }
6817 }
6818
6819 static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6820 {
6821         u32 ssvpc;
6822
6823         /* Do not perform spoof check for 82598 or if not in IOV mode */
6824         if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
6825             adapter->num_vfs == 0)
6826                 return;
6827
6828         ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
6829
6830         /*
6831          * ssvpc register is cleared on read, if zero then no
6832          * spoofed packets in the last interval.
6833          */
6834         if (!ssvpc)
6835                 return;
6836
6837         e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
6838 }
6839 #else
6840 static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
6841 {
6842 }
6843
6844 static void
6845 ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
6846 {
6847 }
6848 #endif /* CONFIG_PCI_IOV */
6849
6850
6851 /**
6852  * ixgbe_watchdog_subtask - check and bring link up
6853  * @adapter: pointer to the device adapter structure
6854  **/
6855 static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
6856 {
6857         /* if interface is down, removing or resetting, do nothing */
6858         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6859             test_bit(__IXGBE_REMOVING, &adapter->state) ||
6860             test_bit(__IXGBE_RESETTING, &adapter->state))
6861                 return;
6862
6863         ixgbe_watchdog_update_link(adapter);
6864
6865         if (adapter->link_up)
6866                 ixgbe_watchdog_link_is_up(adapter);
6867         else
6868                 ixgbe_watchdog_link_is_down(adapter);
6869
6870         ixgbe_check_for_bad_vf(adapter);
6871         ixgbe_spoof_check(adapter);
6872         ixgbe_update_stats(adapter);
6873
6874         ixgbe_watchdog_flush_tx(adapter);
6875 }
6876
6877 /**
6878  * ixgbe_sfp_detection_subtask - poll for SFP+ cable
6879  * @adapter: the ixgbe adapter structure
6880  **/
6881 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
6882 {
6883         struct ixgbe_hw *hw = &adapter->hw;
6884         s32 err;
6885
6886         /* not searching for SFP so there is nothing to do here */
6887         if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
6888             !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6889                 return;
6890
6891         if (adapter->sfp_poll_time &&
6892             time_after(adapter->sfp_poll_time, jiffies))
6893                 return; /* If not yet time to poll for SFP */
6894
6895         /* someone else is in init, wait until next service event */
6896         if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6897                 return;
6898
6899         adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
6900
6901         err = hw->phy.ops.identify_sfp(hw);
6902         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6903                 goto sfp_out;
6904
6905         if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
6906                 /* If no cable is present, then we need to reset
6907                  * the next time we find a good cable. */
6908                 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
6909         }
6910
6911         /* exit on error */
6912         if (err)
6913                 goto sfp_out;
6914
6915         /* exit if reset not needed */
6916         if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6917                 goto sfp_out;
6918
6919         adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
6920
6921         /*
6922          * A module may be identified correctly, but the EEPROM may not have
6923          * support for that module.  setup_sfp() will fail in that case, so
6924          * we should not allow that module to load.
6925          */
6926         if (hw->mac.type == ixgbe_mac_82598EB)
6927                 err = hw->phy.ops.reset(hw);
6928         else
6929                 err = hw->mac.ops.setup_sfp(hw);
6930
6931         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6932                 goto sfp_out;
6933
6934         adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
6935         e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
6936
6937 sfp_out:
6938         clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6939
6940         if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
6941             (adapter->netdev->reg_state == NETREG_REGISTERED)) {
6942                 e_dev_err("failed to initialize because an unsupported "
6943                           "SFP+ module type was detected.\n");
6944                 e_dev_err("Reload the driver after installing a "
6945                           "supported module.\n");
6946                 unregister_netdev(adapter->netdev);
6947         }
6948 }
6949
6950 /**
6951  * ixgbe_sfp_link_config_subtask - set up link SFP after module install
6952  * @adapter: the ixgbe adapter structure
6953  **/
6954 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
6955 {
6956         struct ixgbe_hw *hw = &adapter->hw;
6957         u32 speed;
6958         bool autoneg = false;
6959
6960         if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
6961                 return;
6962
6963         /* someone else is in init, wait until next service event */
6964         if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6965                 return;
6966
6967         adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
6968
6969         speed = hw->phy.autoneg_advertised;
6970         if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
6971                 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
6972
6973                 /* setup the highest link when no autoneg */
6974                 if (!autoneg) {
6975                         if (speed & IXGBE_LINK_SPEED_10GB_FULL)
6976                                 speed = IXGBE_LINK_SPEED_10GB_FULL;
6977                 }
6978         }
6979
6980         if (hw->mac.ops.setup_link)
6981                 hw->mac.ops.setup_link(hw, speed, true);
6982
6983         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
6984         adapter->link_check_timeout = jiffies;
6985         clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6986 }
6987
6988 /**
6989  * ixgbe_service_timer - Timer Call-back
6990  * @data: pointer to adapter cast into an unsigned long
6991  **/
6992 static void ixgbe_service_timer(unsigned long data)
6993 {
6994         struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
6995         unsigned long next_event_offset;
6996
6997         /* poll faster when waiting for link */
6998         if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
6999                 next_event_offset = HZ / 10;
7000         else
7001                 next_event_offset = HZ * 2;
7002
7003         /* Reset the timer */
7004         mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7005
7006         ixgbe_service_event_schedule(adapter);
7007 }
7008
7009 static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7010 {
7011         struct ixgbe_hw *hw = &adapter->hw;
7012         u32 status;
7013
7014         if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7015                 return;
7016
7017         adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7018
7019         if (!hw->phy.ops.handle_lasi)
7020                 return;
7021
7022         status = hw->phy.ops.handle_lasi(&adapter->hw);
7023         if (status != IXGBE_ERR_OVERTEMP)
7024                 return;
7025
7026         e_crit(drv, "%s\n", ixgbe_overheat_msg);
7027 }
7028
7029 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7030 {
7031         if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
7032                 return;
7033
7034         adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
7035
7036         /* If we're already down, removing or resetting, just bail */
7037         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7038             test_bit(__IXGBE_REMOVING, &adapter->state) ||
7039             test_bit(__IXGBE_RESETTING, &adapter->state))
7040                 return;
7041
7042         ixgbe_dump(adapter);
7043         netdev_err(adapter->netdev, "Reset adapter\n");
7044         adapter->tx_timeout_count++;
7045
7046         rtnl_lock();
7047         ixgbe_reinit_locked(adapter);
7048         rtnl_unlock();
7049 }
7050
7051 /**
7052  * ixgbe_service_task - manages and runs subtasks
7053  * @work: pointer to work_struct containing our data
7054  **/
7055 static void ixgbe_service_task(struct work_struct *work)
7056 {
7057         struct ixgbe_adapter *adapter = container_of(work,
7058                                                      struct ixgbe_adapter,
7059                                                      service_task);
7060         if (ixgbe_removed(adapter->hw.hw_addr)) {
7061                 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7062                         rtnl_lock();
7063                         ixgbe_down(adapter);
7064                         rtnl_unlock();
7065                 }
7066                 ixgbe_service_event_complete(adapter);
7067                 return;
7068         }
7069 #ifdef CONFIG_IXGBE_VXLAN
7070         if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
7071                 adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
7072                 vxlan_get_rx_port(adapter->netdev);
7073         }
7074 #endif /* CONFIG_IXGBE_VXLAN */
7075         ixgbe_reset_subtask(adapter);
7076         ixgbe_phy_interrupt_subtask(adapter);
7077         ixgbe_sfp_detection_subtask(adapter);
7078         ixgbe_sfp_link_config_subtask(adapter);
7079         ixgbe_check_overtemp_subtask(adapter);
7080         ixgbe_watchdog_subtask(adapter);
7081         ixgbe_fdir_reinit_subtask(adapter);
7082         ixgbe_check_hang_subtask(adapter);
7083
7084         if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7085                 ixgbe_ptp_overflow_check(adapter);
7086                 ixgbe_ptp_rx_hang(adapter);
7087         }
7088
7089         ixgbe_service_event_complete(adapter);
7090 }
7091
7092 static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7093                      struct ixgbe_tx_buffer *first,
7094                      u8 *hdr_len)
7095 {
7096         struct sk_buff *skb = first->skb;
7097         u32 vlan_macip_lens, type_tucmd;
7098         u32 mss_l4len_idx, l4len;
7099         int err;
7100
7101         if (skb->ip_summed != CHECKSUM_PARTIAL)
7102                 return 0;
7103
7104         if (!skb_is_gso(skb))
7105                 return 0;
7106
7107         err = skb_cow_head(skb, 0);
7108         if (err < 0)
7109                 return err;
7110
7111         /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
7112         type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7113
7114         if (first->protocol == htons(ETH_P_IP)) {
7115                 struct iphdr *iph = ip_hdr(skb);
7116                 iph->tot_len = 0;
7117                 iph->check = 0;
7118                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7119                                                          iph->daddr, 0,
7120                                                          IPPROTO_TCP,
7121                                                          0);
7122                 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7123                 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7124                                    IXGBE_TX_FLAGS_CSUM |
7125                                    IXGBE_TX_FLAGS_IPV4;
7126         } else if (skb_is_gso_v6(skb)) {
7127                 ipv6_hdr(skb)->payload_len = 0;
7128                 tcp_hdr(skb)->check =
7129                     ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
7130                                      &ipv6_hdr(skb)->daddr,
7131                                      0, IPPROTO_TCP, 0);
7132                 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7133                                    IXGBE_TX_FLAGS_CSUM;
7134         }
7135
7136         /* compute header lengths */
7137         l4len = tcp_hdrlen(skb);
7138         *hdr_len = skb_transport_offset(skb) + l4len;
7139
7140         /* update gso size and bytecount with header size */
7141         first->gso_segs = skb_shinfo(skb)->gso_segs;
7142         first->bytecount += (first->gso_segs - 1) * *hdr_len;
7143
7144         /* mss_l4len_id: use 0 as index for TSO */
7145         mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
7146         mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
7147
7148         /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
7149         vlan_macip_lens = skb_network_header_len(skb);
7150         vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7151         vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7152
7153         ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
7154                           mss_l4len_idx);
7155
7156         return 1;
7157 }
7158
7159 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
7160                           struct ixgbe_tx_buffer *first)
7161 {
7162         struct sk_buff *skb = first->skb;
7163         u32 vlan_macip_lens = 0;
7164         u32 mss_l4len_idx = 0;
7165         u32 type_tucmd = 0;
7166
7167         if (skb->ip_summed != CHECKSUM_PARTIAL) {
7168                 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
7169                     !(first->tx_flags & IXGBE_TX_FLAGS_CC))
7170                         return;
7171                 vlan_macip_lens = skb_network_offset(skb) <<
7172                                   IXGBE_ADVTXD_MACLEN_SHIFT;
7173         } else {
7174                 u8 l4_hdr = 0;
7175                 union {
7176                         struct iphdr *ipv4;
7177                         struct ipv6hdr *ipv6;
7178                         u8 *raw;
7179                 } network_hdr;
7180                 union {
7181                         struct tcphdr *tcphdr;
7182                         u8 *raw;
7183                 } transport_hdr;
7184                 __be16 frag_off;
7185
7186                 if (skb->encapsulation) {
7187                         network_hdr.raw = skb_inner_network_header(skb);
7188                         transport_hdr.raw = skb_inner_transport_header(skb);
7189                         vlan_macip_lens = skb_inner_network_offset(skb) <<
7190                                           IXGBE_ADVTXD_MACLEN_SHIFT;
7191                 } else {
7192                         network_hdr.raw = skb_network_header(skb);
7193                         transport_hdr.raw = skb_transport_header(skb);
7194                         vlan_macip_lens = skb_network_offset(skb) <<
7195                                           IXGBE_ADVTXD_MACLEN_SHIFT;
7196                 }
7197
7198                 /* use first 4 bits to determine IP version */
7199                 switch (network_hdr.ipv4->version) {
7200                 case IPVERSION:
7201                         vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
7202                         type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7203                         l4_hdr = network_hdr.ipv4->protocol;
7204                         break;
7205                 case 6:
7206                         vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
7207                         l4_hdr = network_hdr.ipv6->nexthdr;
7208                         if (likely((transport_hdr.raw - network_hdr.raw) ==
7209                                    sizeof(struct ipv6hdr)))
7210                                 break;
7211                         ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
7212                                               sizeof(struct ipv6hdr),
7213                                          &l4_hdr, &frag_off);
7214                         if (unlikely(frag_off))
7215                                 l4_hdr = NEXTHDR_FRAGMENT;
7216                         break;
7217                 default:
7218                         break;
7219                 }
7220
7221                 switch (l4_hdr) {
7222                 case IPPROTO_TCP:
7223                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
7224                         mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
7225                                         IXGBE_ADVTXD_L4LEN_SHIFT;
7226                         break;
7227                 case IPPROTO_SCTP:
7228                         type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
7229                         mss_l4len_idx = sizeof(struct sctphdr) <<
7230                                         IXGBE_ADVTXD_L4LEN_SHIFT;
7231                         break;
7232                 case IPPROTO_UDP:
7233                         mss_l4len_idx = sizeof(struct udphdr) <<
7234                                         IXGBE_ADVTXD_L4LEN_SHIFT;
7235                         break;
7236                 default:
7237                         if (unlikely(net_ratelimit())) {
7238                                 dev_warn(tx_ring->dev,
7239                                          "partial checksum, version=%d, l4 proto=%x\n",
7240                                          network_hdr.ipv4->version, l4_hdr);
7241                         }
7242                         skb_checksum_help(skb);
7243                         goto no_csum;
7244                 }
7245
7246                 /* update TX checksum flag */
7247                 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
7248         }
7249
7250 no_csum:
7251         /* vlan_macip_lens: MACLEN, VLAN tag */
7252         vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7253
7254         ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
7255                           type_tucmd, mss_l4len_idx);
7256 }
7257
7258 #define IXGBE_SET_FLAG(_input, _flag, _result) \
7259         ((_flag <= _result) ? \
7260          ((u32)(_input & _flag) * (_result / _flag)) : \
7261          ((u32)(_input & _flag) / (_flag / _result)))
7262
7263 static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
7264 {
7265         /* set type for advanced descriptor with frame checksum insertion */
7266         u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
7267                        IXGBE_ADVTXD_DCMD_DEXT |
7268                        IXGBE_ADVTXD_DCMD_IFCS;
7269
7270         /* set HW vlan bit if vlan is present */
7271         cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
7272                                    IXGBE_ADVTXD_DCMD_VLE);
7273
7274         /* set segmentation enable bits for TSO/FSO */
7275         cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
7276                                    IXGBE_ADVTXD_DCMD_TSE);
7277
7278         /* set timestamp bit if present */
7279         cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
7280                                    IXGBE_ADVTXD_MAC_TSTAMP);
7281
7282         /* insert frame checksum */
7283         cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
7284
7285         return cmd_type;
7286 }
7287
7288 static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
7289                                    u32 tx_flags, unsigned int paylen)
7290 {
7291         u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
7292
7293         /* enable L4 checksum for TSO and TX checksum offload */
7294         olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7295                                         IXGBE_TX_FLAGS_CSUM,
7296                                         IXGBE_ADVTXD_POPTS_TXSM);
7297
7298         /* enble IPv4 checksum for TSO */
7299         olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7300                                         IXGBE_TX_FLAGS_IPV4,
7301                                         IXGBE_ADVTXD_POPTS_IXSM);
7302
7303         /*
7304          * Check Context must be set if Tx switch is enabled, which it
7305          * always is for case where virtual functions are running
7306          */
7307         olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7308                                         IXGBE_TX_FLAGS_CC,
7309                                         IXGBE_ADVTXD_CC);
7310
7311         tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
7312 }
7313
7314 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7315 {
7316         netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
7317
7318         /* Herbert's original patch had:
7319          *  smp_mb__after_netif_stop_queue();
7320          * but since that doesn't exist yet, just open code it.
7321          */
7322         smp_mb();
7323
7324         /* We need to check again in a case another CPU has just
7325          * made room available.
7326          */
7327         if (likely(ixgbe_desc_unused(tx_ring) < size))
7328                 return -EBUSY;
7329
7330         /* A reprieve! - use start_queue because it doesn't call schedule */
7331         netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
7332         ++tx_ring->tx_stats.restart_queue;
7333         return 0;
7334 }
7335
7336 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7337 {
7338         if (likely(ixgbe_desc_unused(tx_ring) >= size))
7339                 return 0;
7340
7341         return __ixgbe_maybe_stop_tx(tx_ring, size);
7342 }
7343
7344 #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
7345                        IXGBE_TXD_CMD_RS)
7346
7347 static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
7348                          struct ixgbe_tx_buffer *first,
7349                          const u8 hdr_len)
7350 {
7351         struct sk_buff *skb = first->skb;
7352         struct ixgbe_tx_buffer *tx_buffer;
7353         union ixgbe_adv_tx_desc *tx_desc;
7354         struct skb_frag_struct *frag;
7355         dma_addr_t dma;
7356         unsigned int data_len, size;
7357         u32 tx_flags = first->tx_flags;
7358         u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
7359         u16 i = tx_ring->next_to_use;
7360
7361         tx_desc = IXGBE_TX_DESC(tx_ring, i);
7362
7363         ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
7364
7365         size = skb_headlen(skb);
7366         data_len = skb->data_len;
7367
7368 #ifdef IXGBE_FCOE
7369         if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
7370                 if (data_len < sizeof(struct fcoe_crc_eof)) {
7371                         size -= sizeof(struct fcoe_crc_eof) - data_len;
7372                         data_len = 0;
7373                 } else {
7374                         data_len -= sizeof(struct fcoe_crc_eof);
7375                 }
7376         }
7377
7378 #endif
7379         dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
7380
7381         tx_buffer = first;
7382
7383         for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
7384                 if (dma_mapping_error(tx_ring->dev, dma))
7385                         goto dma_error;
7386
7387                 /* record length, and DMA address */
7388                 dma_unmap_len_set(tx_buffer, len, size);
7389                 dma_unmap_addr_set(tx_buffer, dma, dma);
7390
7391                 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7392
7393                 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
7394                         tx_desc->read.cmd_type_len =
7395                                 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
7396
7397                         i++;
7398                         tx_desc++;
7399                         if (i == tx_ring->count) {
7400                                 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7401                                 i = 0;
7402                         }
7403                         tx_desc->read.olinfo_status = 0;
7404
7405                         dma += IXGBE_MAX_DATA_PER_TXD;
7406                         size -= IXGBE_MAX_DATA_PER_TXD;
7407
7408                         tx_desc->read.buffer_addr = cpu_to_le64(dma);
7409                 }
7410
7411                 if (likely(!data_len))
7412                         break;
7413
7414                 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
7415
7416                 i++;
7417                 tx_desc++;
7418                 if (i == tx_ring->count) {
7419                         tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7420                         i = 0;
7421                 }
7422                 tx_desc->read.olinfo_status = 0;
7423
7424 #ifdef IXGBE_FCOE
7425                 size = min_t(unsigned int, data_len, skb_frag_size(frag));
7426 #else
7427                 size = skb_frag_size(frag);
7428 #endif
7429                 data_len -= size;
7430
7431                 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
7432                                        DMA_TO_DEVICE);
7433
7434                 tx_buffer = &tx_ring->tx_buffer_info[i];
7435         }
7436
7437         /* write last descriptor with RS and EOP bits */
7438         cmd_type |= size | IXGBE_TXD_CMD;
7439         tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
7440
7441         netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
7442
7443         /* set the timestamp */
7444         first->time_stamp = jiffies;
7445
7446         /*
7447          * Force memory writes to complete before letting h/w know there
7448          * are new descriptors to fetch.  (Only applicable for weak-ordered
7449          * memory model archs, such as IA-64).
7450          *
7451          * We also need this memory barrier to make certain all of the
7452          * status bits have been updated before next_to_watch is written.
7453          */
7454         wmb();
7455
7456         /* set next_to_watch value indicating a packet is present */
7457         first->next_to_watch = tx_desc;
7458
7459         i++;
7460         if (i == tx_ring->count)
7461                 i = 0;
7462
7463         tx_ring->next_to_use = i;
7464
7465         ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
7466
7467         if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
7468                 writel(i, tx_ring->tail);
7469
7470                 /* we need this if more than one processor can write to our tail
7471                  * at a time, it synchronizes IO on IA64/Altix systems
7472                  */
7473                 mmiowb();
7474         }
7475
7476         return;
7477 dma_error:
7478         dev_err(tx_ring->dev, "TX DMA map failed\n");
7479
7480         /* clear dma mappings for failed tx_buffer_info map */
7481         for (;;) {
7482                 tx_buffer = &tx_ring->tx_buffer_info[i];
7483                 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
7484                 if (tx_buffer == first)
7485                         break;
7486                 if (i == 0)
7487                         i = tx_ring->count;
7488                 i--;
7489         }
7490
7491         tx_ring->next_to_use = i;
7492 }
7493
7494 static void ixgbe_atr(struct ixgbe_ring *ring,
7495                       struct ixgbe_tx_buffer *first)
7496 {
7497         struct ixgbe_q_vector *q_vector = ring->q_vector;
7498         union ixgbe_atr_hash_dword input = { .dword = 0 };
7499         union ixgbe_atr_hash_dword common = { .dword = 0 };
7500         union {
7501                 unsigned char *network;
7502                 struct iphdr *ipv4;
7503                 struct ipv6hdr *ipv6;
7504         } hdr;
7505         struct tcphdr *th;
7506         struct sk_buff *skb;
7507 #ifdef CONFIG_IXGBE_VXLAN
7508         u8 encap = false;
7509 #endif /* CONFIG_IXGBE_VXLAN */
7510         __be16 vlan_id;
7511
7512         /* if ring doesn't have a interrupt vector, cannot perform ATR */
7513         if (!q_vector)
7514                 return;
7515
7516         /* do nothing if sampling is disabled */
7517         if (!ring->atr_sample_rate)
7518                 return;
7519
7520         ring->atr_count++;
7521
7522         /* snag network header to get L4 type and address */
7523         skb = first->skb;
7524         hdr.network = skb_network_header(skb);
7525         if (skb->encapsulation) {
7526 #ifdef CONFIG_IXGBE_VXLAN
7527                 struct ixgbe_adapter *adapter = q_vector->adapter;
7528
7529                 if (!adapter->vxlan_port)
7530                         return;
7531                 if (first->protocol != htons(ETH_P_IP) ||
7532                     hdr.ipv4->version != IPVERSION ||
7533                     hdr.ipv4->protocol != IPPROTO_UDP) {
7534                         return;
7535                 }
7536                 if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
7537                         return;
7538                 encap = true;
7539                 hdr.network = skb_inner_network_header(skb);
7540                 th = inner_tcp_hdr(skb);
7541 #else
7542                 return;
7543 #endif /* CONFIG_IXGBE_VXLAN */
7544         } else {
7545                 /* Currently only IPv4/IPv6 with TCP is supported */
7546                 if ((first->protocol != htons(ETH_P_IPV6) ||
7547                      hdr.ipv6->nexthdr != IPPROTO_TCP) &&
7548                     (first->protocol != htons(ETH_P_IP) ||
7549                      hdr.ipv4->protocol != IPPROTO_TCP))
7550                         return;
7551                 th = tcp_hdr(skb);
7552         }
7553
7554         /* skip this packet since it is invalid or the socket is closing */
7555         if (!th || th->fin)
7556                 return;
7557
7558         /* sample on all syn packets or once every atr sample count */
7559         if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
7560                 return;
7561
7562         /* reset sample count */
7563         ring->atr_count = 0;
7564
7565         vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
7566
7567         /*
7568          * src and dst are inverted, think how the receiver sees them
7569          *
7570          * The input is broken into two sections, a non-compressed section
7571          * containing vm_pool, vlan_id, and flow_type.  The rest of the data
7572          * is XORed together and stored in the compressed dword.
7573          */
7574         input.formatted.vlan_id = vlan_id;
7575
7576         /*
7577          * since src port and flex bytes occupy the same word XOR them together
7578          * and write the value to source port portion of compressed dword
7579          */
7580         if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
7581                 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
7582         else
7583                 common.port.src ^= th->dest ^ first->protocol;
7584         common.port.dst ^= th->source;
7585
7586         if (first->protocol == htons(ETH_P_IP)) {
7587                 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
7588                 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
7589         } else {
7590                 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
7591                 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
7592                              hdr.ipv6->saddr.s6_addr32[1] ^
7593                              hdr.ipv6->saddr.s6_addr32[2] ^
7594                              hdr.ipv6->saddr.s6_addr32[3] ^
7595                              hdr.ipv6->daddr.s6_addr32[0] ^
7596                              hdr.ipv6->daddr.s6_addr32[1] ^
7597                              hdr.ipv6->daddr.s6_addr32[2] ^
7598                              hdr.ipv6->daddr.s6_addr32[3];
7599         }
7600
7601 #ifdef CONFIG_IXGBE_VXLAN
7602         if (encap)
7603                 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
7604 #endif /* CONFIG_IXGBE_VXLAN */
7605
7606         /* This assumes the Rx queue and Tx queue are bound to the same CPU */
7607         ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
7608                                               input, common, ring->queue_index);
7609 }
7610
7611 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
7612                               void *accel_priv, select_queue_fallback_t fallback)
7613 {
7614         struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
7615 #ifdef IXGBE_FCOE
7616         struct ixgbe_adapter *adapter;
7617         struct ixgbe_ring_feature *f;
7618         int txq;
7619 #endif
7620
7621         if (fwd_adapter)
7622                 return skb->queue_mapping + fwd_adapter->tx_base_queue;
7623
7624 #ifdef IXGBE_FCOE
7625
7626         /*
7627          * only execute the code below if protocol is FCoE
7628          * or FIP and we have FCoE enabled on the adapter
7629          */
7630         switch (vlan_get_protocol(skb)) {
7631         case htons(ETH_P_FCOE):
7632         case htons(ETH_P_FIP):
7633                 adapter = netdev_priv(dev);
7634
7635                 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7636                         break;
7637         default:
7638                 return fallback(dev, skb);
7639         }
7640
7641         f = &adapter->ring_feature[RING_F_FCOE];
7642
7643         txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
7644                                            smp_processor_id();
7645
7646         while (txq >= f->indices)
7647                 txq -= f->indices;
7648
7649         return txq + f->offset;
7650 #else
7651         return fallback(dev, skb);
7652 #endif
7653 }
7654
7655 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7656                           struct ixgbe_adapter *adapter,
7657                           struct ixgbe_ring *tx_ring)
7658 {
7659         struct ixgbe_tx_buffer *first;
7660         int tso;
7661         u32 tx_flags = 0;
7662         unsigned short f;
7663         u16 count = TXD_USE_COUNT(skb_headlen(skb));
7664         __be16 protocol = skb->protocol;
7665         u8 hdr_len = 0;
7666
7667         /*
7668          * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
7669          *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
7670          *       + 2 desc gap to keep tail from touching head,
7671          *       + 1 desc for context descriptor,
7672          * otherwise try next time
7673          */
7674         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
7675                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
7676
7677         if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
7678                 tx_ring->tx_stats.tx_busy++;
7679                 return NETDEV_TX_BUSY;
7680         }
7681
7682         /* record the location of the first descriptor for this packet */
7683         first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
7684         first->skb = skb;
7685         first->bytecount = skb->len;
7686         first->gso_segs = 1;
7687
7688         /* if we have a HW VLAN tag being added default to the HW one */
7689         if (skb_vlan_tag_present(skb)) {
7690                 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
7691                 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7692         /* else if it is a SW VLAN check the next protocol and store the tag */
7693         } else if (protocol == htons(ETH_P_8021Q)) {
7694                 struct vlan_hdr *vhdr, _vhdr;
7695                 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
7696                 if (!vhdr)
7697                         goto out_drop;
7698
7699                 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
7700                                   IXGBE_TX_FLAGS_VLAN_SHIFT;
7701                 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
7702         }
7703         protocol = vlan_get_protocol(skb);
7704
7705         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
7706             adapter->ptp_clock &&
7707             !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
7708                                    &adapter->state)) {
7709                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7710                 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
7711
7712                 /* schedule check for Tx timestamp */
7713                 adapter->ptp_tx_skb = skb_get(skb);
7714                 adapter->ptp_tx_start = jiffies;
7715                 schedule_work(&adapter->ptp_tx_work);
7716         }
7717
7718         skb_tx_timestamp(skb);
7719
7720 #ifdef CONFIG_PCI_IOV
7721         /*
7722          * Use the l2switch_enable flag - would be false if the DMA
7723          * Tx switch had been disabled.
7724          */
7725         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7726                 tx_flags |= IXGBE_TX_FLAGS_CC;
7727
7728 #endif
7729         /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
7730         if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
7731             ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
7732              (skb->priority != TC_PRIO_CONTROL))) {
7733                 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
7734                 tx_flags |= (skb->priority & 0x7) <<
7735                                         IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
7736                 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
7737                         struct vlan_ethhdr *vhdr;
7738
7739                         if (skb_cow_head(skb, 0))
7740                                 goto out_drop;
7741                         vhdr = (struct vlan_ethhdr *)skb->data;
7742                         vhdr->h_vlan_TCI = htons(tx_flags >>
7743                                                  IXGBE_TX_FLAGS_VLAN_SHIFT);
7744                 } else {
7745                         tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7746                 }
7747         }
7748
7749         /* record initial flags and protocol */
7750         first->tx_flags = tx_flags;
7751         first->protocol = protocol;
7752
7753 #ifdef IXGBE_FCOE
7754         /* setup tx offload for FCoE */
7755         if ((protocol == htons(ETH_P_FCOE)) &&
7756             (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
7757                 tso = ixgbe_fso(tx_ring, first, &hdr_len);
7758                 if (tso < 0)
7759                         goto out_drop;
7760
7761                 goto xmit_fcoe;
7762         }
7763
7764 #endif /* IXGBE_FCOE */
7765         tso = ixgbe_tso(tx_ring, first, &hdr_len);
7766         if (tso < 0)
7767                 goto out_drop;
7768         else if (!tso)
7769                 ixgbe_tx_csum(tx_ring, first);
7770
7771         /* add the ATR filter if ATR is on */
7772         if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
7773                 ixgbe_atr(tx_ring, first);
7774
7775 #ifdef IXGBE_FCOE
7776 xmit_fcoe:
7777 #endif /* IXGBE_FCOE */
7778         ixgbe_tx_map(tx_ring, first, hdr_len);
7779
7780         return NETDEV_TX_OK;
7781
7782 out_drop:
7783         dev_kfree_skb_any(first->skb);
7784         first->skb = NULL;
7785
7786         return NETDEV_TX_OK;
7787 }
7788
7789 static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
7790                                       struct net_device *netdev,
7791                                       struct ixgbe_ring *ring)
7792 {
7793         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7794         struct ixgbe_ring *tx_ring;
7795
7796         /*
7797          * The minimum packet size for olinfo paylen is 17 so pad the skb
7798          * in order to meet this minimum size requirement.
7799          */
7800         if (skb_put_padto(skb, 17))
7801                 return NETDEV_TX_OK;
7802
7803         tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
7804
7805         return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
7806 }
7807
7808 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
7809                                     struct net_device *netdev)
7810 {
7811         return __ixgbe_xmit_frame(skb, netdev, NULL);
7812 }
7813
7814 /**
7815  * ixgbe_set_mac - Change the Ethernet Address of the NIC
7816  * @netdev: network interface device structure
7817  * @p: pointer to an address structure
7818  *
7819  * Returns 0 on success, negative on failure
7820  **/
7821 static int ixgbe_set_mac(struct net_device *netdev, void *p)
7822 {
7823         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7824         struct ixgbe_hw *hw = &adapter->hw;
7825         struct sockaddr *addr = p;
7826
7827         if (!is_valid_ether_addr(addr->sa_data))
7828                 return -EADDRNOTAVAIL;
7829
7830         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
7831         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
7832
7833         ixgbe_mac_set_default_filter(adapter);
7834
7835         return 0;
7836 }
7837
7838 static int
7839 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
7840 {
7841         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7842         struct ixgbe_hw *hw = &adapter->hw;
7843         u16 value;
7844         int rc;
7845
7846         if (prtad != hw->phy.mdio.prtad)
7847                 return -EINVAL;
7848         rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
7849         if (!rc)
7850                 rc = value;
7851         return rc;
7852 }
7853
7854 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
7855                             u16 addr, u16 value)
7856 {
7857         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7858         struct ixgbe_hw *hw = &adapter->hw;
7859
7860         if (prtad != hw->phy.mdio.prtad)
7861                 return -EINVAL;
7862         return hw->phy.ops.write_reg(hw, addr, devad, value);
7863 }
7864
7865 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
7866 {
7867         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7868
7869         switch (cmd) {
7870         case SIOCSHWTSTAMP:
7871                 return ixgbe_ptp_set_ts_config(adapter, req);
7872         case SIOCGHWTSTAMP:
7873                 return ixgbe_ptp_get_ts_config(adapter, req);
7874         default:
7875                 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
7876         }
7877 }
7878
7879 /**
7880  * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
7881  * netdev->dev_addrs
7882  * @netdev: network interface device structure
7883  *
7884  * Returns non-zero on failure
7885  **/
7886 static int ixgbe_add_sanmac_netdev(struct net_device *dev)
7887 {
7888         int err = 0;
7889         struct ixgbe_adapter *adapter = netdev_priv(dev);
7890         struct ixgbe_hw *hw = &adapter->hw;
7891
7892         if (is_valid_ether_addr(hw->mac.san_addr)) {
7893                 rtnl_lock();
7894                 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
7895                 rtnl_unlock();
7896
7897                 /* update SAN MAC vmdq pool selection */
7898                 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
7899         }
7900         return err;
7901 }
7902
7903 /**
7904  * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
7905  * netdev->dev_addrs
7906  * @netdev: network interface device structure
7907  *
7908  * Returns non-zero on failure
7909  **/
7910 static int ixgbe_del_sanmac_netdev(struct net_device *dev)
7911 {
7912         int err = 0;
7913         struct ixgbe_adapter *adapter = netdev_priv(dev);
7914         struct ixgbe_mac_info *mac = &adapter->hw.mac;
7915
7916         if (is_valid_ether_addr(mac->san_addr)) {
7917                 rtnl_lock();
7918                 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
7919                 rtnl_unlock();
7920         }
7921         return err;
7922 }
7923
7924 #ifdef CONFIG_NET_POLL_CONTROLLER
7925 /*
7926  * Polling 'interrupt' - used by things like netconsole to send skbs
7927  * without having to re-enable interrupts. It's not called while
7928  * the interrupt routine is executing.
7929  */
7930 static void ixgbe_netpoll(struct net_device *netdev)
7931 {
7932         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7933         int i;
7934
7935         /* if interface is down do nothing */
7936         if (test_bit(__IXGBE_DOWN, &adapter->state))
7937                 return;
7938
7939         /* loop through and schedule all active queues */
7940         for (i = 0; i < adapter->num_q_vectors; i++)
7941                 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
7942 }
7943
7944 #endif
7945 static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7946                                                    struct rtnl_link_stats64 *stats)
7947 {
7948         struct ixgbe_adapter *adapter = netdev_priv(netdev);
7949         int i;
7950
7951         rcu_read_lock();
7952         for (i = 0; i < adapter->num_rx_queues; i++) {
7953                 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
7954                 u64 bytes, packets;
7955                 unsigned int start;
7956
7957                 if (ring) {
7958                         do {
7959                                 start = u64_stats_fetch_begin_irq(&ring->syncp);
7960                                 packets = ring->stats.packets;
7961                                 bytes   = ring->stats.bytes;
7962                         } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7963                         stats->rx_packets += packets;
7964                         stats->rx_bytes   += bytes;
7965                 }
7966         }
7967
7968         for (i = 0; i < adapter->num_tx_queues; i++) {
7969                 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
7970                 u64 bytes, packets;
7971                 unsigned int start;
7972
7973                 if (ring) {
7974                         do {
7975                                 start = u64_stats_fetch_begin_irq(&ring->syncp);
7976                                 packets = ring->stats.packets;
7977                                 bytes   = ring->stats.bytes;
7978                         } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7979                         stats->tx_packets += packets;
7980                         stats->tx_bytes   += bytes;
7981                 }
7982         }
7983         rcu_read_unlock();
7984         /* following stats updated by ixgbe_watchdog_task() */
7985         stats->multicast        = netdev->stats.multicast;
7986         stats->rx_errors        = netdev->stats.rx_errors;
7987         stats->rx_length_errors = netdev->stats.rx_length_errors;
7988         stats->rx_crc_errors    = netdev->stats.rx_crc_errors;
7989         stats->rx_missed_errors = netdev->stats.rx_missed_errors;
7990         return stats;
7991 }
7992
7993 #ifdef CONFIG_IXGBE_DCB
7994 /**
7995  * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
7996  * @adapter: pointer to ixgbe_adapter
7997  * @tc: number of traffic classes currently enabled
7998  *
7999  * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
8000  * 802.1Q priority maps to a packet buffer that exists.
8001  */
8002 static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8003 {
8004         struct ixgbe_hw *hw = &adapter->hw;
8005         u32 reg, rsave;
8006         int i;
8007
8008         /* 82598 have a static priority to TC mapping that can not
8009          * be changed so no validation is needed.
8010          */
8011         if (hw->mac.type == ixgbe_mac_82598EB)
8012                 return;
8013
8014         reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8015         rsave = reg;
8016
8017         for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8018                 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
8019
8020                 /* If up2tc is out of bounds default to zero */
8021                 if (up2tc > tc)
8022                         reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
8023         }
8024
8025         if (reg != rsave)
8026                 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
8027
8028         return;
8029 }
8030
8031 /**
8032  * ixgbe_set_prio_tc_map - Configure netdev prio tc map
8033  * @adapter: Pointer to adapter struct
8034  *
8035  * Populate the netdev user priority to tc map
8036  */
8037 static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
8038 {
8039         struct net_device *dev = adapter->netdev;
8040         struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
8041         struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
8042         u8 prio;
8043
8044         for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
8045                 u8 tc = 0;
8046
8047                 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
8048                         tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
8049                 else if (ets)
8050                         tc = ets->prio_tc[prio];
8051
8052                 netdev_set_prio_tc_map(dev, prio, tc);
8053         }
8054 }
8055
8056 #endif /* CONFIG_IXGBE_DCB */
8057 /**
8058  * ixgbe_setup_tc - configure net_device for multiple traffic classes
8059  *
8060  * @netdev: net device to configure
8061  * @tc: number of traffic classes to enable
8062  */
8063 int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8064 {
8065         struct ixgbe_adapter *adapter = netdev_priv(dev);
8066         struct ixgbe_hw *hw = &adapter->hw;
8067         bool pools;
8068
8069         /* Hardware supports up to 8 traffic classes */
8070         if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
8071                 return -EINVAL;
8072
8073         if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
8074                 return -EINVAL;
8075
8076         pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
8077         if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
8078                 return -EBUSY;
8079
8080         /* Hardware has to reinitialize queues and interrupts to
8081          * match packet buffer alignment. Unfortunately, the
8082          * hardware is not flexible enough to do this dynamically.
8083          */
8084         if (netif_running(dev))
8085                 ixgbe_close(dev);
8086         ixgbe_clear_interrupt_scheme(adapter);
8087
8088 #ifdef CONFIG_IXGBE_DCB
8089         if (tc) {
8090                 netdev_set_num_tc(dev, tc);
8091                 ixgbe_set_prio_tc_map(adapter);
8092
8093                 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
8094
8095                 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
8096                         adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
8097                         adapter->hw.fc.requested_mode = ixgbe_fc_none;
8098                 }
8099         } else {
8100                 netdev_reset_tc(dev);
8101
8102                 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
8103                         adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
8104
8105                 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
8106
8107                 adapter->temp_dcb_cfg.pfc_mode_enable = false;
8108                 adapter->dcb_cfg.pfc_mode_enable = false;
8109         }
8110
8111         ixgbe_validate_rtr(adapter, tc);
8112
8113 #endif /* CONFIG_IXGBE_DCB */
8114         ixgbe_init_interrupt_scheme(adapter);
8115
8116         if (netif_running(dev))
8117                 return ixgbe_open(dev);
8118
8119         return 0;
8120 }
8121
8122 #ifdef CONFIG_PCI_IOV
8123 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
8124 {
8125         struct net_device *netdev = adapter->netdev;
8126
8127         rtnl_lock();
8128         ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
8129         rtnl_unlock();
8130 }
8131
8132 #endif
8133 void ixgbe_do_reset(struct net_device *netdev)
8134 {
8135         struct ixgbe_adapter *adapter = netdev_priv(netdev);
8136
8137         if (netif_running(netdev))
8138                 ixgbe_reinit_locked(adapter);
8139         else
8140                 ixgbe_reset(adapter);
8141 }
8142
8143 static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
8144                                             netdev_features_t features)
8145 {
8146         struct ixgbe_adapter *adapter = netdev_priv(netdev);
8147
8148         /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
8149         if (!(features & NETIF_F_RXCSUM))
8150                 features &= ~NETIF_F_LRO;
8151
8152         /* Turn off LRO if not RSC capable */
8153         if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
8154                 features &= ~NETIF_F_LRO;
8155
8156         return features;
8157 }
8158
8159 static int ixgbe_set_features(struct net_device *netdev,
8160                               netdev_features_t features)
8161 {
8162         struct ixgbe_adapter *adapter = netdev_priv(netdev);
8163         netdev_features_t changed = netdev->features ^ features;
8164         bool need_reset = false;
8165
8166         /* Make sure RSC matches LRO, reset if change */
8167         if (!(features & NETIF_F_LRO)) {
8168                 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
8169                         need_reset = true;
8170                 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
8171         } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
8172                    !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
8173                 if (adapter->rx_itr_setting == 1 ||
8174                     adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
8175                         adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
8176                         need_reset = true;
8177                 } else if ((changed ^ features) & NETIF_F_LRO) {
8178                         e_info(probe, "rx-usecs set too low, "
8179                                "disabling RSC\n");
8180                 }
8181         }
8182
8183         /*
8184          * Check if Flow Director n-tuple support was enabled or disabled.  If
8185          * the state changed, we need to reset.
8186          */
8187         switch (features & NETIF_F_NTUPLE) {
8188         case NETIF_F_NTUPLE:
8189                 /* turn off ATR, enable perfect filters and reset */
8190                 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
8191                         need_reset = true;
8192
8193                 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
8194                 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
8195                 break;
8196         default:
8197                 /* turn off perfect filters, enable ATR and reset */
8198                 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
8199                         need_reset = true;
8200
8201                 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
8202
8203                 /* We cannot enable ATR if SR-IOV is enabled */
8204                 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8205                         break;
8206
8207                 /* We cannot enable ATR if we have 2 or more traffic classes */
8208                 if (netdev_get_num_tc(netdev) > 1)
8209                         break;
8210
8211                 /* We cannot enable ATR if RSS is disabled */
8212                 if (adapter->ring_feature[RING_F_RSS].limit <= 1)
8213                         break;
8214
8215                 /* A sample rate of 0 indicates ATR disabled */
8216                 if (!adapter->atr_sample_rate)
8217                         break;
8218
8219                 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
8220                 break;
8221         }
8222
8223         if (features & NETIF_F_HW_VLAN_CTAG_RX)
8224                 ixgbe_vlan_strip_enable(adapter);
8225         else
8226                 ixgbe_vlan_strip_disable(adapter);
8227
8228         if (changed & NETIF_F_RXALL)
8229                 need_reset = true;
8230
8231         netdev->features = features;
8232
8233 #ifdef CONFIG_IXGBE_VXLAN
8234         if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
8235                 if (features & NETIF_F_RXCSUM)
8236                         adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
8237                 else
8238                         ixgbe_clear_vxlan_port(adapter);
8239         }
8240 #endif /* CONFIG_IXGBE_VXLAN */
8241
8242         if (need_reset)
8243                 ixgbe_do_reset(netdev);
8244
8245         return 0;
8246 }
8247
8248 #ifdef CONFIG_IXGBE_VXLAN
8249 /**
8250  * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
8251  * @dev: The port's netdev
8252  * @sa_family: Socket Family that VXLAN is notifiying us about
8253  * @port: New UDP port number that VXLAN started listening to
8254  **/
8255 static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8256                                  __be16 port)
8257 {
8258         struct ixgbe_adapter *adapter = netdev_priv(dev);
8259         struct ixgbe_hw *hw = &adapter->hw;
8260         u16 new_port = ntohs(port);
8261
8262         if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8263                 return;
8264
8265         if (sa_family == AF_INET6)
8266                 return;
8267
8268         if (adapter->vxlan_port == new_port)
8269                 return;
8270
8271         if (adapter->vxlan_port) {
8272                 netdev_info(dev,
8273                             "Hit Max num of VXLAN ports, not adding port %d\n",
8274                             new_port);
8275                 return;
8276         }
8277
8278         adapter->vxlan_port = new_port;
8279         IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
8280 }
8281
8282 /**
8283  * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
8284  * @dev: The port's netdev
8285  * @sa_family: Socket Family that VXLAN is notifying us about
8286  * @port: UDP port number that VXLAN stopped listening to
8287  **/
8288 static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8289                                  __be16 port)
8290 {
8291         struct ixgbe_adapter *adapter = netdev_priv(dev);
8292         u16 new_port = ntohs(port);
8293
8294         if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8295                 return;
8296
8297         if (sa_family == AF_INET6)
8298                 return;
8299
8300         if (adapter->vxlan_port != new_port) {
8301                 netdev_info(dev, "Port %d was not found, not deleting\n",
8302                             new_port);
8303                 return;
8304         }
8305
8306         ixgbe_clear_vxlan_port(adapter);
8307         adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
8308 }
8309 #endif /* CONFIG_IXGBE_VXLAN */
8310
8311 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8312                              struct net_device *dev,
8313                              const unsigned char *addr, u16 vid,
8314                              u16 flags)
8315 {
8316         /* guarantee we can provide a unique filter for the unicast address */
8317         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
8318                 struct ixgbe_adapter *adapter = netdev_priv(dev);
8319                 u16 pool = VMDQ_P(0);
8320
8321                 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
8322                         return -ENOMEM;
8323         }
8324
8325         return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
8326 }
8327
8328 /**
8329  * ixgbe_configure_bridge_mode - set various bridge modes
8330  * @adapter - the private structure
8331  * @mode - requested bridge mode
8332  *
8333  * Configure some settings require for various bridge modes.
8334  **/
8335 static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
8336                                        __u16 mode)
8337 {
8338         struct ixgbe_hw *hw = &adapter->hw;
8339         unsigned int p, num_pools;
8340         u32 vmdctl;
8341
8342         switch (mode) {
8343         case BRIDGE_MODE_VEPA:
8344                 /* disable Tx loopback, rely on switch hairpin mode */
8345                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
8346
8347                 /* must enable Rx switching replication to allow multicast
8348                  * packet reception on all VFs, and to enable source address
8349                  * pruning.
8350                  */
8351                 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
8352                 vmdctl |= IXGBE_VT_CTL_REPLEN;
8353                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
8354
8355                 /* enable Rx source address pruning. Note, this requires
8356                  * replication to be enabled or else it does nothing.
8357                  */
8358                 num_pools = adapter->num_vfs + adapter->num_rx_pools;
8359                 for (p = 0; p < num_pools; p++) {
8360                         if (hw->mac.ops.set_source_address_pruning)
8361                                 hw->mac.ops.set_source_address_pruning(hw,
8362                                                                        true,
8363                                                                        p);
8364                 }
8365                 break;
8366         case BRIDGE_MODE_VEB:
8367                 /* enable Tx loopback for internal VF/PF communication */
8368                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
8369                                 IXGBE_PFDTXGSWC_VT_LBEN);
8370
8371                 /* disable Rx switching replication unless we have SR-IOV
8372                  * virtual functions
8373                  */
8374                 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
8375                 if (!adapter->num_vfs)
8376                         vmdctl &= ~IXGBE_VT_CTL_REPLEN;
8377                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
8378
8379                 /* disable Rx source address pruning, since we don't expect to
8380                  * be receiving external loopback of our transmitted frames.
8381                  */
8382                 num_pools = adapter->num_vfs + adapter->num_rx_pools;
8383                 for (p = 0; p < num_pools; p++) {
8384                         if (hw->mac.ops.set_source_address_pruning)
8385                                 hw->mac.ops.set_source_address_pruning(hw,
8386                                                                        false,
8387                                                                        p);
8388                 }
8389                 break;
8390         default:
8391                 return -EINVAL;
8392         }
8393
8394         adapter->bridge_mode = mode;
8395
8396         e_info(drv, "enabling bridge mode: %s\n",
8397                mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
8398
8399         return 0;
8400 }
8401
8402 static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
8403                                     struct nlmsghdr *nlh, u16 flags)
8404 {
8405         struct ixgbe_adapter *adapter = netdev_priv(dev);
8406         struct nlattr *attr, *br_spec;
8407         int rem;
8408
8409         if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
8410                 return -EOPNOTSUPP;
8411
8412         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8413         if (!br_spec)
8414                 return -EINVAL;
8415
8416         nla_for_each_nested(attr, br_spec, rem) {
8417                 int status;
8418                 __u16 mode;
8419
8420                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8421                         continue;
8422
8423                 if (nla_len(attr) < sizeof(mode))
8424                         return -EINVAL;
8425
8426                 mode = nla_get_u16(attr);
8427                 status = ixgbe_configure_bridge_mode(adapter, mode);
8428                 if (status)
8429                         return status;
8430
8431                 break;
8432         }
8433
8434         return 0;
8435 }
8436
8437 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8438                                     struct net_device *dev,
8439                                     u32 filter_mask, int nlflags)
8440 {
8441         struct ixgbe_adapter *adapter = netdev_priv(dev);
8442
8443         if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
8444                 return 0;
8445
8446         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
8447                                        adapter->bridge_mode, 0, 0, nlflags,
8448                                        filter_mask, NULL);
8449 }
8450
8451 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
8452 {
8453         struct ixgbe_fwd_adapter *fwd_adapter = NULL;
8454         struct ixgbe_adapter *adapter = netdev_priv(pdev);
8455         int used_pools = adapter->num_vfs + adapter->num_rx_pools;
8456         unsigned int limit;
8457         int pool, err;
8458
8459         /* Hardware has a limited number of available pools. Each VF, and the
8460          * PF require a pool. Check to ensure we don't attempt to use more
8461          * then the available number of pools.
8462          */
8463         if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
8464                 return ERR_PTR(-EINVAL);
8465
8466 #ifdef CONFIG_RPS
8467         if (vdev->num_rx_queues != vdev->num_tx_queues) {
8468                 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
8469                             vdev->name);
8470                 return ERR_PTR(-EINVAL);
8471         }
8472 #endif
8473         /* Check for hardware restriction on number of rx/tx queues */
8474         if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
8475             vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
8476                 netdev_info(pdev,
8477                             "%s: Supports RX/TX Queue counts 1,2, and 4\n",
8478                             pdev->name);
8479                 return ERR_PTR(-EINVAL);
8480         }
8481
8482         if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8483               adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
8484             (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
8485                 return ERR_PTR(-EBUSY);
8486
8487         fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
8488         if (!fwd_adapter)
8489                 return ERR_PTR(-ENOMEM);
8490
8491         pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
8492         adapter->num_rx_pools++;
8493         set_bit(pool, &adapter->fwd_bitmask);
8494         limit = find_last_bit(&adapter->fwd_bitmask, 32);
8495
8496         /* Enable VMDq flag so device will be set in VM mode */
8497         adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
8498         adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
8499         adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
8500
8501         /* Force reinit of ring allocation with VMDQ enabled */
8502         err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
8503         if (err)
8504                 goto fwd_add_err;
8505         fwd_adapter->pool = pool;
8506         fwd_adapter->real_adapter = adapter;
8507         err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
8508         if (err)
8509                 goto fwd_add_err;
8510         netif_tx_start_all_queues(vdev);
8511         return fwd_adapter;
8512 fwd_add_err:
8513         /* unwind counter and free adapter struct */
8514         netdev_info(pdev,
8515                     "%s: dfwd hardware acceleration failed\n", vdev->name);
8516         clear_bit(pool, &adapter->fwd_bitmask);
8517         adapter->num_rx_pools--;
8518         kfree(fwd_adapter);
8519         return ERR_PTR(err);
8520 }
8521
8522 static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
8523 {
8524         struct ixgbe_fwd_adapter *fwd_adapter = priv;
8525         struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
8526         unsigned int limit;
8527
8528         clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
8529         adapter->num_rx_pools--;
8530
8531         limit = find_last_bit(&adapter->fwd_bitmask, 32);
8532         adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
8533         ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
8534         ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
8535         netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
8536                    fwd_adapter->pool, adapter->num_rx_pools,
8537                    fwd_adapter->rx_base_queue,
8538                    fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
8539                    adapter->fwd_bitmask);
8540         kfree(fwd_adapter);
8541 }
8542
8543 #define IXGBE_MAX_TUNNEL_HDR_LEN 80
8544 static netdev_features_t
8545 ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
8546                      netdev_features_t features)
8547 {
8548         if (!skb->encapsulation)
8549                 return features;
8550
8551         if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
8552                      IXGBE_MAX_TUNNEL_HDR_LEN))
8553                 return features & ~NETIF_F_ALL_CSUM;
8554
8555         return features;
8556 }
8557
8558 static const struct net_device_ops ixgbe_netdev_ops = {
8559         .ndo_open               = ixgbe_open,
8560         .ndo_stop               = ixgbe_close,
8561         .ndo_start_xmit         = ixgbe_xmit_frame,
8562         .ndo_select_queue       = ixgbe_select_queue,
8563         .ndo_set_rx_mode        = ixgbe_set_rx_mode,
8564         .ndo_validate_addr      = eth_validate_addr,
8565         .ndo_set_mac_address    = ixgbe_set_mac,
8566         .ndo_change_mtu         = ixgbe_change_mtu,
8567         .ndo_tx_timeout         = ixgbe_tx_timeout,
8568         .ndo_vlan_rx_add_vid    = ixgbe_vlan_rx_add_vid,
8569         .ndo_vlan_rx_kill_vid   = ixgbe_vlan_rx_kill_vid,
8570         .ndo_do_ioctl           = ixgbe_ioctl,
8571         .ndo_set_vf_mac         = ixgbe_ndo_set_vf_mac,
8572         .ndo_set_vf_vlan        = ixgbe_ndo_set_vf_vlan,
8573         .ndo_set_vf_rate        = ixgbe_ndo_set_vf_bw,
8574         .ndo_set_vf_spoofchk    = ixgbe_ndo_set_vf_spoofchk,
8575         .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
8576         .ndo_set_vf_trust       = ixgbe_ndo_set_vf_trust,
8577         .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
8578         .ndo_get_stats64        = ixgbe_get_stats64,
8579 #ifdef CONFIG_IXGBE_DCB
8580         .ndo_setup_tc           = ixgbe_setup_tc,
8581 #endif
8582 #ifdef CONFIG_NET_POLL_CONTROLLER
8583         .ndo_poll_controller    = ixgbe_netpoll,
8584 #endif
8585 #ifdef CONFIG_NET_RX_BUSY_POLL
8586         .ndo_busy_poll          = ixgbe_low_latency_recv,
8587 #endif
8588 #ifdef IXGBE_FCOE
8589         .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
8590         .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
8591         .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
8592         .ndo_fcoe_enable = ixgbe_fcoe_enable,
8593         .ndo_fcoe_disable = ixgbe_fcoe_disable,
8594         .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
8595         .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
8596 #endif /* IXGBE_FCOE */
8597         .ndo_set_features = ixgbe_set_features,
8598         .ndo_fix_features = ixgbe_fix_features,
8599         .ndo_fdb_add            = ixgbe_ndo_fdb_add,
8600         .ndo_bridge_setlink     = ixgbe_ndo_bridge_setlink,
8601         .ndo_bridge_getlink     = ixgbe_ndo_bridge_getlink,
8602         .ndo_dfwd_add_station   = ixgbe_fwd_add,
8603         .ndo_dfwd_del_station   = ixgbe_fwd_del,
8604 #ifdef CONFIG_IXGBE_VXLAN
8605         .ndo_add_vxlan_port     = ixgbe_add_vxlan_port,
8606         .ndo_del_vxlan_port     = ixgbe_del_vxlan_port,
8607 #endif /* CONFIG_IXGBE_VXLAN */
8608         .ndo_features_check     = ixgbe_features_check,
8609 };
8610
8611 /**
8612  * ixgbe_enumerate_functions - Get the number of ports this device has
8613  * @adapter: adapter structure
8614  *
8615  * This function enumerates the phsyical functions co-located on a single slot,
8616  * in order to determine how many ports a device has. This is most useful in
8617  * determining the required GT/s of PCIe bandwidth necessary for optimal
8618  * performance.
8619  **/
8620 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
8621 {
8622         struct pci_dev *entry, *pdev = adapter->pdev;
8623         int physfns = 0;
8624
8625         /* Some cards can not use the generic count PCIe functions method,
8626          * because they are behind a parent switch, so we hardcode these with
8627          * the correct number of functions.
8628          */
8629         if (ixgbe_pcie_from_parent(&adapter->hw))
8630                 physfns = 4;
8631
8632         list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
8633                 /* don't count virtual functions */
8634                 if (entry->is_virtfn)
8635                         continue;
8636
8637                 /* When the devices on the bus don't all match our device ID,
8638                  * we can't reliably determine the correct number of
8639                  * functions. This can occur if a function has been direct
8640                  * attached to a virtual machine using VT-d, for example. In
8641                  * this case, simply return -1 to indicate this.
8642                  */
8643                 if ((entry->vendor != pdev->vendor) ||
8644                     (entry->device != pdev->device))
8645                         return -1;
8646
8647                 physfns++;
8648         }
8649
8650         return physfns;
8651 }
8652
8653 /**
8654  * ixgbe_wol_supported - Check whether device supports WoL
8655  * @hw: hw specific details
8656  * @device_id: the device ID
8657  * @subdev_id: the subsystem device ID
8658  *
8659  * This function is used by probe and ethtool to determine
8660  * which devices have WoL support
8661  *
8662  **/
8663 int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
8664                         u16 subdevice_id)
8665 {
8666         struct ixgbe_hw *hw = &adapter->hw;
8667         u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
8668         int is_wol_supported = 0;
8669
8670         switch (device_id) {
8671         case IXGBE_DEV_ID_82599_SFP:
8672                 /* Only these subdevices could supports WOL */
8673                 switch (subdevice_id) {
8674                 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
8675                 case IXGBE_SUBDEV_ID_82599_560FLR:
8676                         /* only support first port */
8677                         if (hw->bus.func != 0)
8678                                 break;
8679                 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
8680                 case IXGBE_SUBDEV_ID_82599_SFP:
8681                 case IXGBE_SUBDEV_ID_82599_RNDC:
8682                 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
8683                 case IXGBE_SUBDEV_ID_82599_LOM_SFP:
8684                         is_wol_supported = 1;
8685                         break;
8686                 }
8687                 break;
8688         case IXGBE_DEV_ID_82599EN_SFP:
8689                 /* Only this subdevice supports WOL */
8690                 switch (subdevice_id) {
8691                 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
8692                         is_wol_supported = 1;
8693                         break;
8694                 }
8695                 break;
8696         case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
8697                 /* All except this subdevice support WOL */
8698                 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
8699                         is_wol_supported = 1;
8700                 break;
8701         case IXGBE_DEV_ID_82599_KX4:
8702                 is_wol_supported = 1;
8703                 break;
8704         case IXGBE_DEV_ID_X540T:
8705         case IXGBE_DEV_ID_X540T1:
8706         case IXGBE_DEV_ID_X550T:
8707         case IXGBE_DEV_ID_X550EM_X_KX4:
8708         case IXGBE_DEV_ID_X550EM_X_KR:
8709         case IXGBE_DEV_ID_X550EM_X_10G_T:
8710                 /* check eeprom to see if enabled wol */
8711                 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
8712                     ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
8713                      (hw->bus.func == 0))) {
8714                         is_wol_supported = 1;
8715                 }
8716                 break;
8717         }
8718
8719         return is_wol_supported;
8720 }
8721
8722 /**
8723  * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
8724  * @adapter: Pointer to adapter struct
8725  */
8726 static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
8727 {
8728 #ifdef CONFIG_OF
8729         struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
8730         struct ixgbe_hw *hw = &adapter->hw;
8731         const unsigned char *addr;
8732
8733         addr = of_get_mac_address(dp);
8734         if (addr) {
8735                 ether_addr_copy(hw->mac.perm_addr, addr);
8736                 return;
8737         }
8738 #endif /* CONFIG_OF */
8739
8740 #ifdef CONFIG_SPARC
8741         ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
8742 #endif /* CONFIG_SPARC */
8743 }
8744
8745 /**
8746  * ixgbe_probe - Device Initialization Routine
8747  * @pdev: PCI device information struct
8748  * @ent: entry in ixgbe_pci_tbl
8749  *
8750  * Returns 0 on success, negative on failure
8751  *
8752  * ixgbe_probe initializes an adapter identified by a pci_dev structure.
8753  * The OS initialization, configuring of the adapter private structure,
8754  * and a hardware reset occur.
8755  **/
8756 static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8757 {
8758         struct net_device *netdev;
8759         struct ixgbe_adapter *adapter = NULL;
8760         struct ixgbe_hw *hw;
8761         const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
8762         int i, err, pci_using_dac, expected_gts;
8763         unsigned int indices = MAX_TX_QUEUES;
8764         u8 part_str[IXGBE_PBANUM_LENGTH];
8765         bool disable_dev = false;
8766 #ifdef IXGBE_FCOE
8767         u16 device_caps;
8768 #endif
8769         u32 eec;
8770
8771         /* Catch broken hardware that put the wrong VF device ID in
8772          * the PCIe SR-IOV capability.
8773          */
8774         if (pdev->is_virtfn) {
8775                 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
8776                      pci_name(pdev), pdev->vendor, pdev->device);
8777                 return -EINVAL;
8778         }
8779
8780         err = pci_enable_device_mem(pdev);
8781         if (err)
8782                 return err;
8783
8784         if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
8785                 pci_using_dac = 1;
8786         } else {
8787                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8788                 if (err) {
8789                         dev_err(&pdev->dev,
8790                                 "No usable DMA configuration, aborting\n");
8791                         goto err_dma;
8792                 }
8793                 pci_using_dac = 0;
8794         }
8795
8796         err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
8797                                            IORESOURCE_MEM), ixgbe_driver_name);
8798         if (err) {
8799                 dev_err(&pdev->dev,
8800                         "pci_request_selected_regions failed 0x%x\n", err);
8801                 goto err_pci_reg;
8802         }
8803
8804         pci_enable_pcie_error_reporting(pdev);
8805
8806         pci_set_master(pdev);
8807         pci_save_state(pdev);
8808
8809         if (ii->mac == ixgbe_mac_82598EB) {
8810 #ifdef CONFIG_IXGBE_DCB
8811                 /* 8 TC w/ 4 queues per TC */
8812                 indices = 4 * MAX_TRAFFIC_CLASS;
8813 #else
8814                 indices = IXGBE_MAX_RSS_INDICES;
8815 #endif
8816         }
8817
8818         netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
8819         if (!netdev) {
8820                 err = -ENOMEM;
8821                 goto err_alloc_etherdev;
8822         }
8823
8824         SET_NETDEV_DEV(netdev, &pdev->dev);
8825
8826         adapter = netdev_priv(netdev);
8827
8828         adapter->netdev = netdev;
8829         adapter->pdev = pdev;
8830         hw = &adapter->hw;
8831         hw->back = adapter;
8832         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
8833
8834         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
8835                               pci_resource_len(pdev, 0));
8836         adapter->io_addr = hw->hw_addr;
8837         if (!hw->hw_addr) {
8838                 err = -EIO;
8839                 goto err_ioremap;
8840         }
8841
8842         netdev->netdev_ops = &ixgbe_netdev_ops;
8843         ixgbe_set_ethtool_ops(netdev);
8844         netdev->watchdog_timeo = 5 * HZ;
8845         strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
8846
8847         /* Setup hw api */
8848         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
8849         hw->mac.type  = ii->mac;
8850         hw->mvals     = ii->mvals;
8851
8852         /* EEPROM */
8853         memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
8854         eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
8855         if (ixgbe_removed(hw->hw_addr)) {
8856                 err = -EIO;
8857                 goto err_ioremap;
8858         }
8859         /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
8860         if (!(eec & (1 << 8)))
8861                 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
8862
8863         /* PHY */
8864         memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
8865         hw->phy.sfp_type = ixgbe_sfp_type_unknown;
8866         /* ixgbe_identify_phy_generic will set prtad and mmds properly */
8867         hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
8868         hw->phy.mdio.mmds = 0;
8869         hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8870         hw->phy.mdio.dev = netdev;
8871         hw->phy.mdio.mdio_read = ixgbe_mdio_read;
8872         hw->phy.mdio.mdio_write = ixgbe_mdio_write;
8873
8874         ii->get_invariants(hw);
8875
8876         /* setup the private structure */
8877         err = ixgbe_sw_init(adapter);
8878         if (err)
8879                 goto err_sw_init;
8880
8881         /* Make it possible the adapter to be woken up via WOL */
8882         switch (adapter->hw.mac.type) {
8883         case ixgbe_mac_82599EB:
8884         case ixgbe_mac_X540:
8885         case ixgbe_mac_X550:
8886         case ixgbe_mac_X550EM_x:
8887                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
8888                 break;
8889         default:
8890                 break;
8891         }
8892
8893         /*
8894          * If there is a fan on this device and it has failed log the
8895          * failure.
8896          */
8897         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
8898                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
8899                 if (esdp & IXGBE_ESDP_SDP1)
8900                         e_crit(probe, "Fan has stopped, replace the adapter\n");
8901         }
8902
8903         if (allow_unsupported_sfp)
8904                 hw->allow_unsupported_sfp = allow_unsupported_sfp;
8905
8906         /* reset_hw fills in the perm_addr as well */
8907         hw->phy.reset_if_overtemp = true;
8908         err = hw->mac.ops.reset_hw(hw);
8909         hw->phy.reset_if_overtemp = false;
8910         if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
8911                 err = 0;
8912         } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
8913                 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
8914                 e_dev_err("Reload the driver after installing a supported module.\n");
8915                 goto err_sw_init;
8916         } else if (err) {
8917                 e_dev_err("HW Init failed: %d\n", err);
8918                 goto err_sw_init;
8919         }
8920
8921 #ifdef CONFIG_PCI_IOV
8922         /* SR-IOV not supported on the 82598 */
8923         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
8924                 goto skip_sriov;
8925         /* Mailbox */
8926         ixgbe_init_mbx_params_pf(hw);
8927         memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
8928         pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
8929         ixgbe_enable_sriov(adapter);
8930 skip_sriov:
8931
8932 #endif
8933         netdev->features = NETIF_F_SG |
8934                            NETIF_F_IP_CSUM |
8935                            NETIF_F_IPV6_CSUM |
8936                            NETIF_F_HW_VLAN_CTAG_TX |
8937                            NETIF_F_HW_VLAN_CTAG_RX |
8938                            NETIF_F_TSO |
8939                            NETIF_F_TSO6 |
8940                            NETIF_F_RXHASH |
8941                            NETIF_F_RXCSUM;
8942
8943         netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
8944
8945         switch (adapter->hw.mac.type) {
8946         case ixgbe_mac_82599EB:
8947         case ixgbe_mac_X540:
8948         case ixgbe_mac_X550:
8949         case ixgbe_mac_X550EM_x:
8950                 netdev->features |= NETIF_F_SCTP_CSUM;
8951                 netdev->hw_features |= NETIF_F_SCTP_CSUM |
8952                                        NETIF_F_NTUPLE;
8953                 break;
8954         default:
8955                 break;
8956         }
8957
8958         netdev->hw_features |= NETIF_F_RXALL;
8959         netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
8960
8961         netdev->vlan_features |= NETIF_F_TSO;
8962         netdev->vlan_features |= NETIF_F_TSO6;
8963         netdev->vlan_features |= NETIF_F_IP_CSUM;
8964         netdev->vlan_features |= NETIF_F_IPV6_CSUM;
8965         netdev->vlan_features |= NETIF_F_SG;
8966
8967         netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
8968                                    NETIF_F_IPV6_CSUM;
8969
8970         netdev->priv_flags |= IFF_UNICAST_FLT;
8971         netdev->priv_flags |= IFF_SUPP_NOFCS;
8972
8973 #ifdef CONFIG_IXGBE_VXLAN
8974         switch (adapter->hw.mac.type) {
8975         case ixgbe_mac_X550:
8976         case ixgbe_mac_X550EM_x:
8977                 netdev->hw_enc_features |= NETIF_F_RXCSUM |
8978                                            NETIF_F_IP_CSUM |
8979                                            NETIF_F_IPV6_CSUM;
8980                 break;
8981         default:
8982                 break;
8983         }
8984 #endif /* CONFIG_IXGBE_VXLAN */
8985
8986 #ifdef CONFIG_IXGBE_DCB
8987         netdev->dcbnl_ops = &dcbnl_ops;
8988 #endif
8989
8990 #ifdef IXGBE_FCOE
8991         if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
8992                 unsigned int fcoe_l;
8993
8994                 if (hw->mac.ops.get_device_caps) {
8995                         hw->mac.ops.get_device_caps(hw, &device_caps);
8996                         if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
8997                                 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
8998                 }
8999
9000
9001                 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
9002                 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
9003
9004                 netdev->features |= NETIF_F_FSO |
9005                                     NETIF_F_FCOE_CRC;
9006
9007                 netdev->vlan_features |= NETIF_F_FSO |
9008                                          NETIF_F_FCOE_CRC |
9009                                          NETIF_F_FCOE_MTU;
9010         }
9011 #endif /* IXGBE_FCOE */
9012         if (pci_using_dac) {
9013                 netdev->features |= NETIF_F_HIGHDMA;
9014                 netdev->vlan_features |= NETIF_F_HIGHDMA;
9015         }
9016
9017         if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
9018                 netdev->hw_features |= NETIF_F_LRO;
9019         if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9020                 netdev->features |= NETIF_F_LRO;
9021
9022         /* make sure the EEPROM is good */
9023         if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
9024                 e_dev_err("The EEPROM Checksum Is Not Valid\n");
9025                 err = -EIO;
9026                 goto err_sw_init;
9027         }
9028
9029         ixgbe_get_platform_mac_addr(adapter);
9030
9031         memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
9032
9033         if (!is_valid_ether_addr(netdev->dev_addr)) {
9034                 e_dev_err("invalid MAC address\n");
9035                 err = -EIO;
9036                 goto err_sw_init;
9037         }
9038
9039         ixgbe_mac_set_default_filter(adapter);
9040
9041         setup_timer(&adapter->service_timer, &ixgbe_service_timer,
9042                     (unsigned long) adapter);
9043
9044         if (ixgbe_removed(hw->hw_addr)) {
9045                 err = -EIO;
9046                 goto err_sw_init;
9047         }
9048         INIT_WORK(&adapter->service_task, ixgbe_service_task);
9049         set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
9050         clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
9051
9052         err = ixgbe_init_interrupt_scheme(adapter);
9053         if (err)
9054                 goto err_sw_init;
9055
9056         /* WOL not supported for all devices */
9057         adapter->wol = 0;
9058         hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
9059         hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
9060                                                 pdev->subsystem_device);
9061         if (hw->wol_enabled)
9062                 adapter->wol = IXGBE_WUFC_MAG;
9063
9064         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
9065
9066         /* save off EEPROM version number */
9067         hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
9068         hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
9069
9070         /* pick up the PCI bus settings for reporting later */
9071         if (ixgbe_pcie_from_parent(hw))
9072                 ixgbe_get_parent_bus_info(adapter);
9073         else
9074                  hw->mac.ops.get_bus_info(hw);
9075
9076         /* calculate the expected PCIe bandwidth required for optimal
9077          * performance. Note that some older parts will never have enough
9078          * bandwidth due to being older generation PCIe parts. We clamp these
9079          * parts to ensure no warning is displayed if it can't be fixed.
9080          */
9081         switch (hw->mac.type) {
9082         case ixgbe_mac_82598EB:
9083                 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
9084                 break;
9085         default:
9086                 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
9087                 break;
9088         }
9089
9090         /* don't check link if we failed to enumerate functions */
9091         if (expected_gts > 0)
9092                 ixgbe_check_minimum_link(adapter, expected_gts);
9093
9094         err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
9095         if (err)
9096                 strlcpy(part_str, "Unknown", sizeof(part_str));
9097         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
9098                 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
9099                            hw->mac.type, hw->phy.type, hw->phy.sfp_type,
9100                            part_str);
9101         else
9102                 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
9103                            hw->mac.type, hw->phy.type, part_str);
9104
9105         e_dev_info("%pM\n", netdev->dev_addr);
9106
9107         /* reset the hardware with the new settings */
9108         err = hw->mac.ops.start_hw(hw);
9109         if (err == IXGBE_ERR_EEPROM_VERSION) {
9110                 /* We are running on a pre-production device, log a warning */
9111                 e_dev_warn("This device is a pre-production adapter/LOM. "
9112                            "Please be aware there may be issues associated "
9113                            "with your hardware.  If you are experiencing "
9114                            "problems please contact your Intel or hardware "
9115                            "representative who provided you with this "
9116                            "hardware.\n");
9117         }
9118         strcpy(netdev->name, "eth%d");
9119         err = register_netdev(netdev);
9120         if (err)
9121                 goto err_register;
9122
9123         pci_set_drvdata(pdev, adapter);
9124
9125         /* power down the optics for 82599 SFP+ fiber */
9126         if (hw->mac.ops.disable_tx_laser)
9127                 hw->mac.ops.disable_tx_laser(hw);
9128
9129         /* carrier off reporting is important to ethtool even BEFORE open */
9130         netif_carrier_off(netdev);
9131
9132 #ifdef CONFIG_IXGBE_DCA
9133         if (dca_add_requester(&pdev->dev) == 0) {
9134                 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
9135                 ixgbe_setup_dca(adapter);
9136         }
9137 #endif
9138         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
9139                 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
9140                 for (i = 0; i < adapter->num_vfs; i++)
9141                         ixgbe_vf_configuration(pdev, (i | 0x10000000));
9142         }
9143
9144         /* firmware requires driver version to be 0xFFFFFFFF
9145          * since os does not support feature
9146          */
9147         if (hw->mac.ops.set_fw_drv_ver)
9148                 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
9149                                            0xFF);
9150
9151         /* add san mac addr to netdev */
9152         ixgbe_add_sanmac_netdev(netdev);
9153
9154         e_dev_info("%s\n", ixgbe_default_device_descr);
9155
9156 #ifdef CONFIG_IXGBE_HWMON
9157         if (ixgbe_sysfs_init(adapter))
9158                 e_err(probe, "failed to allocate sysfs resources\n");
9159 #endif /* CONFIG_IXGBE_HWMON */
9160
9161         ixgbe_dbg_adapter_init(adapter);
9162
9163         /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
9164         if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
9165                 hw->mac.ops.setup_link(hw,
9166                         IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
9167                         true);
9168
9169         return 0;
9170
9171 err_register:
9172         ixgbe_release_hw_control(adapter);
9173         ixgbe_clear_interrupt_scheme(adapter);
9174 err_sw_init:
9175         ixgbe_disable_sriov(adapter);
9176         adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
9177         iounmap(adapter->io_addr);
9178         kfree(adapter->mac_table);
9179 err_ioremap:
9180         disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
9181         free_netdev(netdev);
9182 err_alloc_etherdev:
9183         pci_release_selected_regions(pdev,
9184                                      pci_select_bars(pdev, IORESOURCE_MEM));
9185 err_pci_reg:
9186 err_dma:
9187         if (!adapter || disable_dev)
9188                 pci_disable_device(pdev);
9189         return err;
9190 }
9191
9192 /**
9193  * ixgbe_remove - Device Removal Routine
9194  * @pdev: PCI device information struct
9195  *
9196  * ixgbe_remove is called by the PCI subsystem to alert the driver
9197  * that it should release a PCI device.  The could be caused by a
9198  * Hot-Plug event, or because the driver is going to be removed from
9199  * memory.
9200  **/
9201 static void ixgbe_remove(struct pci_dev *pdev)
9202 {
9203         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9204         struct net_device *netdev;
9205         bool disable_dev;
9206
9207         /* if !adapter then we already cleaned up in probe */
9208         if (!adapter)
9209                 return;
9210
9211         netdev  = adapter->netdev;
9212         ixgbe_dbg_adapter_exit(adapter);
9213
9214         set_bit(__IXGBE_REMOVING, &adapter->state);
9215         cancel_work_sync(&adapter->service_task);
9216
9217
9218 #ifdef CONFIG_IXGBE_DCA
9219         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
9220                 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
9221                 dca_remove_requester(&pdev->dev);
9222                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
9223                                 IXGBE_DCA_CTRL_DCA_DISABLE);
9224         }
9225
9226 #endif
9227 #ifdef CONFIG_IXGBE_HWMON
9228         ixgbe_sysfs_exit(adapter);
9229 #endif /* CONFIG_IXGBE_HWMON */
9230
9231         /* remove the added san mac */
9232         ixgbe_del_sanmac_netdev(netdev);
9233
9234 #ifdef CONFIG_PCI_IOV
9235         ixgbe_disable_sriov(adapter);
9236 #endif
9237         if (netdev->reg_state == NETREG_REGISTERED)
9238                 unregister_netdev(netdev);
9239
9240         ixgbe_clear_interrupt_scheme(adapter);
9241
9242         ixgbe_release_hw_control(adapter);
9243
9244 #ifdef CONFIG_DCB
9245         kfree(adapter->ixgbe_ieee_pfc);
9246         kfree(adapter->ixgbe_ieee_ets);
9247
9248 #endif
9249         iounmap(adapter->io_addr);
9250         pci_release_selected_regions(pdev, pci_select_bars(pdev,
9251                                      IORESOURCE_MEM));
9252
9253         e_dev_info("complete\n");
9254
9255         kfree(adapter->mac_table);
9256         disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
9257         free_netdev(netdev);
9258
9259         pci_disable_pcie_error_reporting(pdev);
9260
9261         if (disable_dev)
9262                 pci_disable_device(pdev);
9263 }
9264
9265 /**
9266  * ixgbe_io_error_detected - called when PCI error is detected
9267  * @pdev: Pointer to PCI device
9268  * @state: The current pci connection state
9269  *
9270  * This function is called after a PCI bus error affecting
9271  * this device has been detected.
9272  */
9273 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
9274                                                 pci_channel_state_t state)
9275 {
9276         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9277         struct net_device *netdev = adapter->netdev;
9278
9279 #ifdef CONFIG_PCI_IOV
9280         struct ixgbe_hw *hw = &adapter->hw;
9281         struct pci_dev *bdev, *vfdev;
9282         u32 dw0, dw1, dw2, dw3;
9283         int vf, pos;
9284         u16 req_id, pf_func;
9285
9286         if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
9287             adapter->num_vfs == 0)
9288                 goto skip_bad_vf_detection;
9289
9290         bdev = pdev->bus->self;
9291         while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
9292                 bdev = bdev->bus->self;
9293
9294         if (!bdev)
9295                 goto skip_bad_vf_detection;
9296
9297         pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
9298         if (!pos)
9299                 goto skip_bad_vf_detection;
9300
9301         dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
9302         dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
9303         dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
9304         dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
9305         if (ixgbe_removed(hw->hw_addr))
9306                 goto skip_bad_vf_detection;
9307
9308         req_id = dw1 >> 16;
9309         /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
9310         if (!(req_id & 0x0080))
9311                 goto skip_bad_vf_detection;
9312
9313         pf_func = req_id & 0x01;
9314         if ((pf_func & 1) == (pdev->devfn & 1)) {
9315                 unsigned int device_id;
9316
9317                 vf = (req_id & 0x7F) >> 1;
9318                 e_dev_err("VF %d has caused a PCIe error\n", vf);
9319                 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
9320                                 "%8.8x\tdw3: %8.8x\n",
9321                 dw0, dw1, dw2, dw3);
9322                 switch (adapter->hw.mac.type) {
9323                 case ixgbe_mac_82599EB:
9324                         device_id = IXGBE_82599_VF_DEVICE_ID;
9325                         break;
9326                 case ixgbe_mac_X540:
9327                         device_id = IXGBE_X540_VF_DEVICE_ID;
9328                         break;
9329                 case ixgbe_mac_X550:
9330                         device_id = IXGBE_DEV_ID_X550_VF;
9331                         break;
9332                 case ixgbe_mac_X550EM_x:
9333                         device_id = IXGBE_DEV_ID_X550EM_X_VF;
9334                         break;
9335                 default:
9336                         device_id = 0;
9337                         break;
9338                 }
9339
9340                 /* Find the pci device of the offending VF */
9341                 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
9342                 while (vfdev) {
9343                         if (vfdev->devfn == (req_id & 0xFF))
9344                                 break;
9345                         vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
9346                                                device_id, vfdev);
9347                 }
9348                 /*
9349                  * There's a slim chance the VF could have been hot plugged,
9350                  * so if it is no longer present we don't need to issue the
9351                  * VFLR.  Just clean up the AER in that case.
9352                  */
9353                 if (vfdev) {
9354                         ixgbe_issue_vf_flr(adapter, vfdev);
9355                         /* Free device reference count */
9356                         pci_dev_put(vfdev);
9357                 }
9358
9359                 pci_cleanup_aer_uncorrect_error_status(pdev);
9360         }
9361
9362         /*
9363          * Even though the error may have occurred on the other port
9364          * we still need to increment the vf error reference count for
9365          * both ports because the I/O resume function will be called
9366          * for both of them.
9367          */
9368         adapter->vferr_refcount++;
9369
9370         return PCI_ERS_RESULT_RECOVERED;
9371
9372 skip_bad_vf_detection:
9373 #endif /* CONFIG_PCI_IOV */
9374         if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
9375                 return PCI_ERS_RESULT_DISCONNECT;
9376
9377         rtnl_lock();
9378         netif_device_detach(netdev);
9379
9380         if (state == pci_channel_io_perm_failure) {
9381                 rtnl_unlock();
9382                 return PCI_ERS_RESULT_DISCONNECT;
9383         }
9384
9385         if (netif_running(netdev))
9386                 ixgbe_down(adapter);
9387
9388         if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
9389                 pci_disable_device(pdev);
9390         rtnl_unlock();
9391
9392         /* Request a slot reset. */
9393         return PCI_ERS_RESULT_NEED_RESET;
9394 }
9395
9396 /**
9397  * ixgbe_io_slot_reset - called after the pci bus has been reset.
9398  * @pdev: Pointer to PCI device
9399  *
9400  * Restart the card from scratch, as if from a cold-boot.
9401  */
9402 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
9403 {
9404         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9405         pci_ers_result_t result;
9406         int err;
9407
9408         if (pci_enable_device_mem(pdev)) {
9409                 e_err(probe, "Cannot re-enable PCI device after reset.\n");
9410                 result = PCI_ERS_RESULT_DISCONNECT;
9411         } else {
9412                 smp_mb__before_atomic();
9413                 clear_bit(__IXGBE_DISABLED, &adapter->state);
9414                 adapter->hw.hw_addr = adapter->io_addr;
9415                 pci_set_master(pdev);
9416                 pci_restore_state(pdev);
9417                 pci_save_state(pdev);
9418
9419                 pci_wake_from_d3(pdev, false);
9420
9421                 ixgbe_reset(adapter);
9422                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
9423                 result = PCI_ERS_RESULT_RECOVERED;
9424         }
9425
9426         err = pci_cleanup_aer_uncorrect_error_status(pdev);
9427         if (err) {
9428                 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
9429                           "failed 0x%0x\n", err);
9430                 /* non-fatal, continue */
9431         }
9432
9433         return result;
9434 }
9435
9436 /**
9437  * ixgbe_io_resume - called when traffic can start flowing again.
9438  * @pdev: Pointer to PCI device
9439  *
9440  * This callback is called when the error recovery driver tells us that
9441  * its OK to resume normal operation.
9442  */
9443 static void ixgbe_io_resume(struct pci_dev *pdev)
9444 {
9445         struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9446         struct net_device *netdev = adapter->netdev;
9447
9448 #ifdef CONFIG_PCI_IOV
9449         if (adapter->vferr_refcount) {
9450                 e_info(drv, "Resuming after VF err\n");
9451                 adapter->vferr_refcount--;
9452                 return;
9453         }
9454
9455 #endif
9456         if (netif_running(netdev))
9457                 ixgbe_up(adapter);
9458
9459         netif_device_attach(netdev);
9460 }
9461
9462 static const struct pci_error_handlers ixgbe_err_handler = {
9463         .error_detected = ixgbe_io_error_detected,
9464         .slot_reset = ixgbe_io_slot_reset,
9465         .resume = ixgbe_io_resume,
9466 };
9467
9468 static struct pci_driver ixgbe_driver = {
9469         .name     = ixgbe_driver_name,
9470         .id_table = ixgbe_pci_tbl,
9471         .probe    = ixgbe_probe,
9472         .remove   = ixgbe_remove,
9473 #ifdef CONFIG_PM
9474         .suspend  = ixgbe_suspend,
9475         .resume   = ixgbe_resume,
9476 #endif
9477         .shutdown = ixgbe_shutdown,
9478         .sriov_configure = ixgbe_pci_sriov_configure,
9479         .err_handler = &ixgbe_err_handler
9480 };
9481
9482 /**
9483  * ixgbe_init_module - Driver Registration Routine
9484  *
9485  * ixgbe_init_module is the first routine called when the driver is
9486  * loaded. All it does is register with the PCI subsystem.
9487  **/
9488 static int __init ixgbe_init_module(void)
9489 {
9490         int ret;
9491         pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
9492         pr_info("%s\n", ixgbe_copyright);
9493
9494         ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
9495         if (!ixgbe_wq) {
9496                 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
9497                 return -ENOMEM;
9498         }
9499
9500         ixgbe_dbg_init();
9501
9502         ret = pci_register_driver(&ixgbe_driver);
9503         if (ret) {
9504                 ixgbe_dbg_exit();
9505                 return ret;
9506         }
9507
9508 #ifdef CONFIG_IXGBE_DCA
9509         dca_register_notify(&dca_notifier);
9510 #endif
9511
9512         return 0;
9513 }
9514
9515 module_init(ixgbe_init_module);
9516
9517 /**
9518  * ixgbe_exit_module - Driver Exit Cleanup Routine
9519  *
9520  * ixgbe_exit_module is called just before the driver is removed
9521  * from memory.
9522  **/
9523 static void __exit ixgbe_exit_module(void)
9524 {
9525 #ifdef CONFIG_IXGBE_DCA
9526         dca_unregister_notify(&dca_notifier);
9527 #endif
9528         pci_unregister_driver(&ixgbe_driver);
9529
9530         ixgbe_dbg_exit();
9531         if (ixgbe_wq) {
9532                 destroy_workqueue(ixgbe_wq);
9533                 ixgbe_wq = NULL;
9534         }
9535 }
9536
9537 #ifdef CONFIG_IXGBE_DCA
9538 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
9539                             void *p)
9540 {
9541         int ret_val;
9542
9543         ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
9544                                          __ixgbe_notify_dca);
9545
9546         return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
9547 }
9548
9549 #endif /* CONFIG_IXGBE_DCA */
9550
9551 module_exit(ixgbe_exit_module);
9552
9553 /* ixgbe_main.c */