1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/init.h>
34 #include <linux/pci.h>
35 #include <linux/vmalloc.h>
36 #include <linux/pagemap.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/tcp.h>
41 #include <linux/ipv6.h>
42 #include <linux/slab.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
47 #include <linux/cpu.h>
48 #include <linux/smp.h>
49 #include <linux/pm_qos.h>
50 #include <linux/pm_runtime.h>
51 #include <linux/aer.h>
52 #include <linux/prefetch.h>
56 #define DRV_EXTRAVERSION "-k"
58 #define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
59 char e1000e_driver_name[] = "e1000e";
60 const char e1000e_driver_version[] = DRV_VERSION;
62 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
63 static int debug = -1;
64 module_param(debug, int, 0);
65 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
67 static const struct e1000_info *e1000_info_tbl[] = {
68 [board_82571] = &e1000_82571_info,
69 [board_82572] = &e1000_82572_info,
70 [board_82573] = &e1000_82573_info,
71 [board_82574] = &e1000_82574_info,
72 [board_82583] = &e1000_82583_info,
73 [board_80003es2lan] = &e1000_es2_info,
74 [board_ich8lan] = &e1000_ich8_info,
75 [board_ich9lan] = &e1000_ich9_info,
76 [board_ich10lan] = &e1000_ich10_info,
77 [board_pchlan] = &e1000_pch_info,
78 [board_pch2lan] = &e1000_pch2_info,
79 [board_pch_lpt] = &e1000_pch_lpt_info,
82 struct e1000_reg_info {
87 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
88 /* General Registers */
90 {E1000_STATUS, "STATUS"},
91 {E1000_CTRL_EXT, "CTRL_EXT"},
93 /* Interrupt Registers */
98 {E1000_RDLEN(0), "RDLEN"},
99 {E1000_RDH(0), "RDH"},
100 {E1000_RDT(0), "RDT"},
101 {E1000_RDTR, "RDTR"},
102 {E1000_RXDCTL(0), "RXDCTL"},
104 {E1000_RDBAL(0), "RDBAL"},
105 {E1000_RDBAH(0), "RDBAH"},
106 {E1000_RDFH, "RDFH"},
107 {E1000_RDFT, "RDFT"},
108 {E1000_RDFHS, "RDFHS"},
109 {E1000_RDFTS, "RDFTS"},
110 {E1000_RDFPC, "RDFPC"},
113 {E1000_TCTL, "TCTL"},
114 {E1000_TDBAL(0), "TDBAL"},
115 {E1000_TDBAH(0), "TDBAH"},
116 {E1000_TDLEN(0), "TDLEN"},
117 {E1000_TDH(0), "TDH"},
118 {E1000_TDT(0), "TDT"},
119 {E1000_TIDV, "TIDV"},
120 {E1000_TXDCTL(0), "TXDCTL"},
121 {E1000_TADV, "TADV"},
122 {E1000_TARC(0), "TARC"},
123 {E1000_TDFH, "TDFH"},
124 {E1000_TDFT, "TDFT"},
125 {E1000_TDFHS, "TDFHS"},
126 {E1000_TDFTS, "TDFTS"},
127 {E1000_TDFPC, "TDFPC"},
129 /* List Terminator */
134 * e1000_regdump - register printout routine
135 * @hw: pointer to the HW structure
136 * @reginfo: pointer to the register info table
138 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
144 switch (reginfo->ofs) {
145 case E1000_RXDCTL(0):
146 for (n = 0; n < 2; n++)
147 regs[n] = __er32(hw, E1000_RXDCTL(n));
149 case E1000_TXDCTL(0):
150 for (n = 0; n < 2; n++)
151 regs[n] = __er32(hw, E1000_TXDCTL(n));
154 for (n = 0; n < 2; n++)
155 regs[n] = __er32(hw, E1000_TARC(n));
158 pr_info("%-15s %08x\n",
159 reginfo->name, __er32(hw, reginfo->ofs));
163 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
164 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
167 static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
168 struct e1000_buffer *bi)
171 struct e1000_ps_page *ps_page;
173 for (i = 0; i < adapter->rx_ps_pages; i++) {
174 ps_page = &bi->ps_pages[i];
177 pr_info("packet dump for ps_page %d:\n", i);
178 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
179 16, 1, page_address(ps_page->page),
186 * e1000e_dump - Print registers, Tx-ring and Rx-ring
187 * @adapter: board private structure
189 static void e1000e_dump(struct e1000_adapter *adapter)
191 struct net_device *netdev = adapter->netdev;
192 struct e1000_hw *hw = &adapter->hw;
193 struct e1000_reg_info *reginfo;
194 struct e1000_ring *tx_ring = adapter->tx_ring;
195 struct e1000_tx_desc *tx_desc;
200 struct e1000_buffer *buffer_info;
201 struct e1000_ring *rx_ring = adapter->rx_ring;
202 union e1000_rx_desc_packet_split *rx_desc_ps;
203 union e1000_rx_desc_extended *rx_desc;
213 if (!netif_msg_hw(adapter))
216 /* Print netdevice Info */
218 dev_info(&adapter->pdev->dev, "Net device Info\n");
219 pr_info("Device Name state trans_start last_rx\n");
220 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
221 netdev->state, netdev->trans_start, netdev->last_rx);
224 /* Print Registers */
225 dev_info(&adapter->pdev->dev, "Register Dump\n");
226 pr_info(" Register Name Value\n");
227 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
228 reginfo->name; reginfo++) {
229 e1000_regdump(hw, reginfo);
232 /* Print Tx Ring Summary */
233 if (!netdev || !netif_running(netdev))
236 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
237 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
238 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
239 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
240 0, tx_ring->next_to_use, tx_ring->next_to_clean,
241 (unsigned long long)buffer_info->dma,
243 buffer_info->next_to_watch,
244 (unsigned long long)buffer_info->time_stamp);
247 if (!netif_msg_tx_done(adapter))
248 goto rx_ring_summary;
250 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
252 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
254 * Legacy Transmit Descriptor
255 * +--------------------------------------------------------------+
256 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
257 * +--------------------------------------------------------------+
258 * 8 | Special | CSS | Status | CMD | CSO | Length |
259 * +--------------------------------------------------------------+
260 * 63 48 47 36 35 32 31 24 23 16 15 0
262 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
263 * 63 48 47 40 39 32 31 16 15 8 7 0
264 * +----------------------------------------------------------------+
265 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
266 * +----------------------------------------------------------------+
267 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
268 * +----------------------------------------------------------------+
269 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
271 * Extended Data Descriptor (DTYP=0x1)
272 * +----------------------------------------------------------------+
273 * 0 | Buffer Address [63:0] |
274 * +----------------------------------------------------------------+
275 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
276 * +----------------------------------------------------------------+
277 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
279 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
280 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
281 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
282 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
283 const char *next_desc;
284 tx_desc = E1000_TX_DESC(*tx_ring, i);
285 buffer_info = &tx_ring->buffer_info[i];
286 u0 = (struct my_u0 *)tx_desc;
287 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
288 next_desc = " NTC/U";
289 else if (i == tx_ring->next_to_use)
291 else if (i == tx_ring->next_to_clean)
295 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
296 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
297 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
299 (unsigned long long)le64_to_cpu(u0->a),
300 (unsigned long long)le64_to_cpu(u0->b),
301 (unsigned long long)buffer_info->dma,
302 buffer_info->length, buffer_info->next_to_watch,
303 (unsigned long long)buffer_info->time_stamp,
304 buffer_info->skb, next_desc);
306 if (netif_msg_pktdata(adapter) && buffer_info->skb)
307 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
308 16, 1, buffer_info->skb->data,
309 buffer_info->skb->len, true);
312 /* Print Rx Ring Summary */
314 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
315 pr_info("Queue [NTU] [NTC]\n");
316 pr_info(" %5d %5X %5X\n",
317 0, rx_ring->next_to_use, rx_ring->next_to_clean);
320 if (!netif_msg_rx_status(adapter))
323 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
324 switch (adapter->rx_ps_pages) {
328 /* [Extended] Packet Split Receive Descriptor Format
330 * +-----------------------------------------------------+
331 * 0 | Buffer Address 0 [63:0] |
332 * +-----------------------------------------------------+
333 * 8 | Buffer Address 1 [63:0] |
334 * +-----------------------------------------------------+
335 * 16 | Buffer Address 2 [63:0] |
336 * +-----------------------------------------------------+
337 * 24 | Buffer Address 3 [63:0] |
338 * +-----------------------------------------------------+
340 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
341 /* [Extended] Receive Descriptor (Write-Back) Format
343 * 63 48 47 32 31 13 12 8 7 4 3 0
344 * +------------------------------------------------------+
345 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
346 * | Checksum | Ident | | Queue | | Type |
347 * +------------------------------------------------------+
348 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
349 * +------------------------------------------------------+
350 * 63 48 47 32 31 20 19 0
352 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
353 for (i = 0; i < rx_ring->count; i++) {
354 const char *next_desc;
355 buffer_info = &rx_ring->buffer_info[i];
356 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
357 u1 = (struct my_u1 *)rx_desc_ps;
359 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
361 if (i == rx_ring->next_to_use)
363 else if (i == rx_ring->next_to_clean)
368 if (staterr & E1000_RXD_STAT_DD) {
369 /* Descriptor Done */
370 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
372 (unsigned long long)le64_to_cpu(u1->a),
373 (unsigned long long)le64_to_cpu(u1->b),
374 (unsigned long long)le64_to_cpu(u1->c),
375 (unsigned long long)le64_to_cpu(u1->d),
376 buffer_info->skb, next_desc);
378 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
380 (unsigned long long)le64_to_cpu(u1->a),
381 (unsigned long long)le64_to_cpu(u1->b),
382 (unsigned long long)le64_to_cpu(u1->c),
383 (unsigned long long)le64_to_cpu(u1->d),
384 (unsigned long long)buffer_info->dma,
385 buffer_info->skb, next_desc);
387 if (netif_msg_pktdata(adapter))
388 e1000e_dump_ps_pages(adapter,
395 /* Extended Receive Descriptor (Read) Format
397 * +-----------------------------------------------------+
398 * 0 | Buffer Address [63:0] |
399 * +-----------------------------------------------------+
401 * +-----------------------------------------------------+
403 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
404 /* Extended Receive Descriptor (Write-Back) Format
406 * 63 48 47 32 31 24 23 4 3 0
407 * +------------------------------------------------------+
409 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
410 * | Packet | IP | | | Type |
411 * | Checksum | Ident | | | |
412 * +------------------------------------------------------+
413 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
414 * +------------------------------------------------------+
415 * 63 48 47 32 31 20 19 0
417 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
419 for (i = 0; i < rx_ring->count; i++) {
420 const char *next_desc;
422 buffer_info = &rx_ring->buffer_info[i];
423 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
424 u1 = (struct my_u1 *)rx_desc;
425 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
427 if (i == rx_ring->next_to_use)
429 else if (i == rx_ring->next_to_clean)
434 if (staterr & E1000_RXD_STAT_DD) {
435 /* Descriptor Done */
436 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
438 (unsigned long long)le64_to_cpu(u1->a),
439 (unsigned long long)le64_to_cpu(u1->b),
440 buffer_info->skb, next_desc);
442 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
444 (unsigned long long)le64_to_cpu(u1->a),
445 (unsigned long long)le64_to_cpu(u1->b),
446 (unsigned long long)buffer_info->dma,
447 buffer_info->skb, next_desc);
449 if (netif_msg_pktdata(adapter) &&
451 print_hex_dump(KERN_INFO, "",
452 DUMP_PREFIX_ADDRESS, 16,
454 buffer_info->skb->data,
455 adapter->rx_buffer_len,
463 * e1000_desc_unused - calculate if we have unused descriptors
465 static int e1000_desc_unused(struct e1000_ring *ring)
467 if (ring->next_to_clean > ring->next_to_use)
468 return ring->next_to_clean - ring->next_to_use - 1;
470 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
474 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
475 * @adapter: board private structure
476 * @hwtstamps: time stamp structure to update
477 * @systim: unsigned 64bit system time value.
479 * Convert the system time value stored in the RX/TXSTMP registers into a
480 * hwtstamp which can be used by the upper level time stamping functions.
482 * The 'systim_lock' spinlock is used to protect the consistency of the
483 * system time value. This is needed because reading the 64 bit time
484 * value involves reading two 32 bit registers. The first read latches the
487 static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
488 struct skb_shared_hwtstamps *hwtstamps,
494 spin_lock_irqsave(&adapter->systim_lock, flags);
495 ns = timecounter_cyc2time(&adapter->tc, systim);
496 spin_unlock_irqrestore(&adapter->systim_lock, flags);
498 memset(hwtstamps, 0, sizeof(*hwtstamps));
499 hwtstamps->hwtstamp = ns_to_ktime(ns);
503 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
504 * @adapter: board private structure
505 * @status: descriptor extended error and status field
506 * @skb: particular skb to include time stamp
508 * If the time stamp is valid, convert it into the timecounter ns value
509 * and store that result into the shhwtstamps structure which is passed
510 * up the network stack.
512 static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
515 struct e1000_hw *hw = &adapter->hw;
518 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
519 !(status & E1000_RXDEXT_STATERR_TST) ||
520 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
523 /* The Rx time stamp registers contain the time stamp. No other
524 * received packet will be time stamped until the Rx time stamp
525 * registers are read. Because only one packet can be time stamped
526 * at a time, the register values must belong to this packet and
527 * therefore none of the other additional attributes need to be
530 rxstmp = (u64)er32(RXSTMPL);
531 rxstmp |= (u64)er32(RXSTMPH) << 32;
532 e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
534 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
538 * e1000_receive_skb - helper function to handle Rx indications
539 * @adapter: board private structure
540 * @staterr: descriptor extended error and status field as written by hardware
541 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
542 * @skb: pointer to sk_buff to be indicated to stack
544 static void e1000_receive_skb(struct e1000_adapter *adapter,
545 struct net_device *netdev, struct sk_buff *skb,
546 u32 staterr, __le16 vlan)
548 u16 tag = le16_to_cpu(vlan);
550 e1000e_rx_hwtstamp(adapter, staterr, skb);
552 skb->protocol = eth_type_trans(skb, netdev);
554 if (staterr & E1000_RXD_STAT_VP)
555 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
557 napi_gro_receive(&adapter->napi, skb);
561 * e1000_rx_checksum - Receive Checksum Offload
562 * @adapter: board private structure
563 * @status_err: receive descriptor status and error fields
564 * @csum: receive descriptor csum field
565 * @sk_buff: socket buffer with received data
567 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
570 u16 status = (u16)status_err;
571 u8 errors = (u8)(status_err >> 24);
573 skb_checksum_none_assert(skb);
575 /* Rx checksum disabled */
576 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
579 /* Ignore Checksum bit is set */
580 if (status & E1000_RXD_STAT_IXSM)
583 /* TCP/UDP checksum error bit or IP checksum error bit is set */
584 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
585 /* let the stack verify checksum errors */
586 adapter->hw_csum_err++;
590 /* TCP/UDP Checksum has not been calculated */
591 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
594 /* It must be a TCP or UDP packet with a valid checksum */
595 skb->ip_summed = CHECKSUM_UNNECESSARY;
596 adapter->hw_csum_good++;
599 static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
601 struct e1000_adapter *adapter = rx_ring->adapter;
602 struct e1000_hw *hw = &adapter->hw;
603 s32 ret_val = __ew32_prepare(hw);
605 writel(i, rx_ring->tail);
607 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
608 u32 rctl = er32(RCTL);
609 ew32(RCTL, rctl & ~E1000_RCTL_EN);
610 e_err("ME firmware caused invalid RDT - resetting\n");
611 schedule_work(&adapter->reset_task);
615 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
617 struct e1000_adapter *adapter = tx_ring->adapter;
618 struct e1000_hw *hw = &adapter->hw;
619 s32 ret_val = __ew32_prepare(hw);
621 writel(i, tx_ring->tail);
623 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
624 u32 tctl = er32(TCTL);
625 ew32(TCTL, tctl & ~E1000_TCTL_EN);
626 e_err("ME firmware caused invalid TDT - resetting\n");
627 schedule_work(&adapter->reset_task);
632 * e1000_alloc_rx_buffers - Replace used receive buffers
633 * @rx_ring: Rx descriptor ring
635 static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
636 int cleaned_count, gfp_t gfp)
638 struct e1000_adapter *adapter = rx_ring->adapter;
639 struct net_device *netdev = adapter->netdev;
640 struct pci_dev *pdev = adapter->pdev;
641 union e1000_rx_desc_extended *rx_desc;
642 struct e1000_buffer *buffer_info;
645 unsigned int bufsz = adapter->rx_buffer_len;
647 i = rx_ring->next_to_use;
648 buffer_info = &rx_ring->buffer_info[i];
650 while (cleaned_count--) {
651 skb = buffer_info->skb;
657 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
659 /* Better luck next round */
660 adapter->alloc_rx_buff_failed++;
664 buffer_info->skb = skb;
666 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
667 adapter->rx_buffer_len,
669 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
670 dev_err(&pdev->dev, "Rx DMA map failed\n");
671 adapter->rx_dma_failed++;
675 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
676 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
678 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
679 /* Force memory writes to complete before letting h/w
680 * know there are new descriptors to fetch. (Only
681 * applicable for weak-ordered memory model archs,
685 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
686 e1000e_update_rdt_wa(rx_ring, i);
688 writel(i, rx_ring->tail);
691 if (i == rx_ring->count)
693 buffer_info = &rx_ring->buffer_info[i];
696 rx_ring->next_to_use = i;
700 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
701 * @rx_ring: Rx descriptor ring
703 static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
704 int cleaned_count, gfp_t gfp)
706 struct e1000_adapter *adapter = rx_ring->adapter;
707 struct net_device *netdev = adapter->netdev;
708 struct pci_dev *pdev = adapter->pdev;
709 union e1000_rx_desc_packet_split *rx_desc;
710 struct e1000_buffer *buffer_info;
711 struct e1000_ps_page *ps_page;
715 i = rx_ring->next_to_use;
716 buffer_info = &rx_ring->buffer_info[i];
718 while (cleaned_count--) {
719 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
721 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
722 ps_page = &buffer_info->ps_pages[j];
723 if (j >= adapter->rx_ps_pages) {
724 /* all unused desc entries get hw null ptr */
725 rx_desc->read.buffer_addr[j + 1] =
729 if (!ps_page->page) {
730 ps_page->page = alloc_page(gfp);
731 if (!ps_page->page) {
732 adapter->alloc_rx_buff_failed++;
735 ps_page->dma = dma_map_page(&pdev->dev,
739 if (dma_mapping_error(&pdev->dev,
741 dev_err(&adapter->pdev->dev,
742 "Rx DMA page map failed\n");
743 adapter->rx_dma_failed++;
747 /* Refresh the desc even if buffer_addrs
748 * didn't change because each write-back
751 rx_desc->read.buffer_addr[j + 1] =
752 cpu_to_le64(ps_page->dma);
755 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
759 adapter->alloc_rx_buff_failed++;
763 buffer_info->skb = skb;
764 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
765 adapter->rx_ps_bsize0,
767 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
768 dev_err(&pdev->dev, "Rx DMA map failed\n");
769 adapter->rx_dma_failed++;
771 dev_kfree_skb_any(skb);
772 buffer_info->skb = NULL;
776 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
778 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
779 /* Force memory writes to complete before letting h/w
780 * know there are new descriptors to fetch. (Only
781 * applicable for weak-ordered memory model archs,
785 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
786 e1000e_update_rdt_wa(rx_ring, i << 1);
788 writel(i << 1, rx_ring->tail);
792 if (i == rx_ring->count)
794 buffer_info = &rx_ring->buffer_info[i];
798 rx_ring->next_to_use = i;
802 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
803 * @rx_ring: Rx descriptor ring
804 * @cleaned_count: number of buffers to allocate this pass
807 static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
808 int cleaned_count, gfp_t gfp)
810 struct e1000_adapter *adapter = rx_ring->adapter;
811 struct net_device *netdev = adapter->netdev;
812 struct pci_dev *pdev = adapter->pdev;
813 union e1000_rx_desc_extended *rx_desc;
814 struct e1000_buffer *buffer_info;
817 unsigned int bufsz = 256 - 16; /* for skb_reserve */
819 i = rx_ring->next_to_use;
820 buffer_info = &rx_ring->buffer_info[i];
822 while (cleaned_count--) {
823 skb = buffer_info->skb;
829 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
830 if (unlikely(!skb)) {
831 /* Better luck next round */
832 adapter->alloc_rx_buff_failed++;
836 buffer_info->skb = skb;
838 /* allocate a new page if necessary */
839 if (!buffer_info->page) {
840 buffer_info->page = alloc_page(gfp);
841 if (unlikely(!buffer_info->page)) {
842 adapter->alloc_rx_buff_failed++;
847 if (!buffer_info->dma) {
848 buffer_info->dma = dma_map_page(&pdev->dev,
849 buffer_info->page, 0,
852 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
853 adapter->alloc_rx_buff_failed++;
858 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
859 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
861 if (unlikely(++i == rx_ring->count))
863 buffer_info = &rx_ring->buffer_info[i];
866 if (likely(rx_ring->next_to_use != i)) {
867 rx_ring->next_to_use = i;
868 if (unlikely(i-- == 0))
869 i = (rx_ring->count - 1);
871 /* Force memory writes to complete before letting h/w
872 * know there are new descriptors to fetch. (Only
873 * applicable for weak-ordered memory model archs,
877 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
878 e1000e_update_rdt_wa(rx_ring, i);
880 writel(i, rx_ring->tail);
884 static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
887 if (netdev->features & NETIF_F_RXHASH)
888 skb->rxhash = le32_to_cpu(rss);
892 * e1000_clean_rx_irq - Send received data up the network stack
893 * @rx_ring: Rx descriptor ring
895 * the return value indicates whether actual cleaning was done, there
896 * is no guarantee that everything was cleaned
898 static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
901 struct e1000_adapter *adapter = rx_ring->adapter;
902 struct net_device *netdev = adapter->netdev;
903 struct pci_dev *pdev = adapter->pdev;
904 struct e1000_hw *hw = &adapter->hw;
905 union e1000_rx_desc_extended *rx_desc, *next_rxd;
906 struct e1000_buffer *buffer_info, *next_buffer;
909 int cleaned_count = 0;
910 bool cleaned = false;
911 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
913 i = rx_ring->next_to_clean;
914 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
915 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
916 buffer_info = &rx_ring->buffer_info[i];
918 while (staterr & E1000_RXD_STAT_DD) {
921 if (*work_done >= work_to_do)
924 rmb(); /* read descriptor and rx_buffer_info after status DD */
926 skb = buffer_info->skb;
927 buffer_info->skb = NULL;
929 prefetch(skb->data - NET_IP_ALIGN);
932 if (i == rx_ring->count)
934 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
937 next_buffer = &rx_ring->buffer_info[i];
941 dma_unmap_single(&pdev->dev, buffer_info->dma,
942 adapter->rx_buffer_len, DMA_FROM_DEVICE);
943 buffer_info->dma = 0;
945 length = le16_to_cpu(rx_desc->wb.upper.length);
947 /* !EOP means multiple descriptors were used to store a single
948 * packet, if that's the case we need to toss it. In fact, we
949 * need to toss every packet with the EOP bit clear and the
950 * next frame that _does_ have the EOP bit set, as it is by
951 * definition only a frame fragment
953 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
954 adapter->flags2 |= FLAG2_IS_DISCARDING;
956 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
957 /* All receives must fit into a single buffer */
958 e_dbg("Receive packet consumed multiple buffers\n");
960 buffer_info->skb = skb;
961 if (staterr & E1000_RXD_STAT_EOP)
962 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
966 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
967 !(netdev->features & NETIF_F_RXALL))) {
969 buffer_info->skb = skb;
973 /* adjust length to remove Ethernet CRC */
974 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
975 /* If configured to store CRC, don't subtract FCS,
976 * but keep the FCS bytes out of the total_rx_bytes
979 if (netdev->features & NETIF_F_RXFCS)
985 total_rx_bytes += length;
988 /* code added for copybreak, this should improve
989 * performance for small packets with large amounts
990 * of reassembly being done in the stack
992 if (length < copybreak) {
993 struct sk_buff *new_skb =
994 netdev_alloc_skb_ip_align(netdev, length);
996 skb_copy_to_linear_data_offset(new_skb,
1002 /* save the skb in buffer_info as good */
1003 buffer_info->skb = skb;
1006 /* else just continue with the old one */
1008 /* end copybreak code */
1009 skb_put(skb, length);
1011 /* Receive Checksum Offload */
1012 e1000_rx_checksum(adapter, staterr, skb);
1014 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1016 e1000_receive_skb(adapter, netdev, skb, staterr,
1017 rx_desc->wb.upper.vlan);
1020 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1022 /* return some buffers to hardware, one at a time is too slow */
1023 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1024 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1029 /* use prefetched values */
1031 buffer_info = next_buffer;
1033 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1035 rx_ring->next_to_clean = i;
1037 cleaned_count = e1000_desc_unused(rx_ring);
1039 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1041 adapter->total_rx_bytes += total_rx_bytes;
1042 adapter->total_rx_packets += total_rx_packets;
1046 static void e1000_put_txbuf(struct e1000_ring *tx_ring,
1047 struct e1000_buffer *buffer_info)
1049 struct e1000_adapter *adapter = tx_ring->adapter;
1051 if (buffer_info->dma) {
1052 if (buffer_info->mapped_as_page)
1053 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1054 buffer_info->length, DMA_TO_DEVICE);
1056 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1057 buffer_info->length, DMA_TO_DEVICE);
1058 buffer_info->dma = 0;
1060 if (buffer_info->skb) {
1061 dev_kfree_skb_any(buffer_info->skb);
1062 buffer_info->skb = NULL;
1064 buffer_info->time_stamp = 0;
1067 static void e1000_print_hw_hang(struct work_struct *work)
1069 struct e1000_adapter *adapter = container_of(work,
1070 struct e1000_adapter,
1072 struct net_device *netdev = adapter->netdev;
1073 struct e1000_ring *tx_ring = adapter->tx_ring;
1074 unsigned int i = tx_ring->next_to_clean;
1075 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1076 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1077 struct e1000_hw *hw = &adapter->hw;
1078 u16 phy_status, phy_1000t_status, phy_ext_status;
1081 if (test_bit(__E1000_DOWN, &adapter->state))
1084 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
1085 /* May be block on write-back, flush and detect again
1086 * flush pending descriptor writebacks to memory
1088 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1089 /* execute the writes immediately */
1091 /* Due to rare timing issues, write to TIDV again to ensure
1092 * the write is successful
1094 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1095 /* execute the writes immediately */
1097 adapter->tx_hang_recheck = true;
1100 /* Real hang detected */
1101 adapter->tx_hang_recheck = false;
1102 netif_stop_queue(netdev);
1104 e1e_rphy(hw, MII_BMSR, &phy_status);
1105 e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
1106 e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
1108 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1110 /* detected Hardware unit hang */
1111 e_err("Detected Hardware Unit Hang:\n"
1114 " next_to_use <%x>\n"
1115 " next_to_clean <%x>\n"
1116 "buffer_info[next_to_clean]:\n"
1117 " time_stamp <%lx>\n"
1118 " next_to_watch <%x>\n"
1120 " next_to_watch.status <%x>\n"
1123 "PHY 1000BASE-T Status <%x>\n"
1124 "PHY Extended Status <%x>\n"
1125 "PCI Status <%x>\n",
1126 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
1127 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
1128 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1129 phy_status, phy_1000t_status, phy_ext_status, pci_status);
1131 /* Suggest workaround for known h/w issue */
1132 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1133 e_err("Try turning off Tx pause (flow control) via ethtool\n");
1137 * e1000e_tx_hwtstamp_work - check for Tx time stamp
1138 * @work: pointer to work struct
1140 * This work function polls the TSYNCTXCTL valid bit to determine when a
1141 * timestamp has been taken for the current stored skb. The timestamp must
1142 * be for this skb because only one such packet is allowed in the queue.
1144 static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1146 struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
1148 struct e1000_hw *hw = &adapter->hw;
1150 if (!adapter->tx_hwtstamp_skb)
1153 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
1154 struct skb_shared_hwtstamps shhwtstamps;
1157 txstmp = er32(TXSTMPL);
1158 txstmp |= (u64)er32(TXSTMPH) << 32;
1160 e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
1162 skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
1163 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1164 adapter->tx_hwtstamp_skb = NULL;
1166 /* reschedule to check later */
1167 schedule_work(&adapter->tx_hwtstamp_work);
1172 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1173 * @tx_ring: Tx descriptor ring
1175 * the return value indicates whether actual cleaning was done, there
1176 * is no guarantee that everything was cleaned
1178 static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1180 struct e1000_adapter *adapter = tx_ring->adapter;
1181 struct net_device *netdev = adapter->netdev;
1182 struct e1000_hw *hw = &adapter->hw;
1183 struct e1000_tx_desc *tx_desc, *eop_desc;
1184 struct e1000_buffer *buffer_info;
1185 unsigned int i, eop;
1186 unsigned int count = 0;
1187 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1188 unsigned int bytes_compl = 0, pkts_compl = 0;
1190 i = tx_ring->next_to_clean;
1191 eop = tx_ring->buffer_info[i].next_to_watch;
1192 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1194 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1195 (count < tx_ring->count)) {
1196 bool cleaned = false;
1197 rmb(); /* read buffer_info after eop_desc */
1198 for (; !cleaned; count++) {
1199 tx_desc = E1000_TX_DESC(*tx_ring, i);
1200 buffer_info = &tx_ring->buffer_info[i];
1201 cleaned = (i == eop);
1204 total_tx_packets += buffer_info->segs;
1205 total_tx_bytes += buffer_info->bytecount;
1206 if (buffer_info->skb) {
1207 bytes_compl += buffer_info->skb->len;
1212 e1000_put_txbuf(tx_ring, buffer_info);
1213 tx_desc->upper.data = 0;
1216 if (i == tx_ring->count)
1220 if (i == tx_ring->next_to_use)
1222 eop = tx_ring->buffer_info[i].next_to_watch;
1223 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1226 tx_ring->next_to_clean = i;
1228 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1230 #define TX_WAKE_THRESHOLD 32
1231 if (count && netif_carrier_ok(netdev) &&
1232 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1233 /* Make sure that anybody stopping the queue after this
1234 * sees the new next_to_clean.
1238 if (netif_queue_stopped(netdev) &&
1239 !(test_bit(__E1000_DOWN, &adapter->state))) {
1240 netif_wake_queue(netdev);
1241 ++adapter->restart_queue;
1245 if (adapter->detect_tx_hung) {
1246 /* Detect a transmit hang in hardware, this serializes the
1247 * check with the clearing of time_stamp and movement of i
1249 adapter->detect_tx_hung = false;
1250 if (tx_ring->buffer_info[i].time_stamp &&
1251 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1252 + (adapter->tx_timeout_factor * HZ)) &&
1253 !(er32(STATUS) & E1000_STATUS_TXOFF))
1254 schedule_work(&adapter->print_hang_task);
1256 adapter->tx_hang_recheck = false;
1258 adapter->total_tx_bytes += total_tx_bytes;
1259 adapter->total_tx_packets += total_tx_packets;
1260 return count < tx_ring->count;
1264 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1265 * @rx_ring: Rx descriptor ring
1267 * the return value indicates whether actual cleaning was done, there
1268 * is no guarantee that everything was cleaned
1270 static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1273 struct e1000_adapter *adapter = rx_ring->adapter;
1274 struct e1000_hw *hw = &adapter->hw;
1275 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1276 struct net_device *netdev = adapter->netdev;
1277 struct pci_dev *pdev = adapter->pdev;
1278 struct e1000_buffer *buffer_info, *next_buffer;
1279 struct e1000_ps_page *ps_page;
1280 struct sk_buff *skb;
1282 u32 length, staterr;
1283 int cleaned_count = 0;
1284 bool cleaned = false;
1285 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1287 i = rx_ring->next_to_clean;
1288 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1289 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1290 buffer_info = &rx_ring->buffer_info[i];
1292 while (staterr & E1000_RXD_STAT_DD) {
1293 if (*work_done >= work_to_do)
1296 skb = buffer_info->skb;
1297 rmb(); /* read descriptor and rx_buffer_info after status DD */
1299 /* in the packet split case this is header only */
1300 prefetch(skb->data - NET_IP_ALIGN);
1303 if (i == rx_ring->count)
1305 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1308 next_buffer = &rx_ring->buffer_info[i];
1312 dma_unmap_single(&pdev->dev, buffer_info->dma,
1313 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1314 buffer_info->dma = 0;
1316 /* see !EOP comment in other Rx routine */
1317 if (!(staterr & E1000_RXD_STAT_EOP))
1318 adapter->flags2 |= FLAG2_IS_DISCARDING;
1320 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1321 e_dbg("Packet Split buffers didn't pick up the full packet\n");
1322 dev_kfree_skb_irq(skb);
1323 if (staterr & E1000_RXD_STAT_EOP)
1324 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1328 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1329 !(netdev->features & NETIF_F_RXALL))) {
1330 dev_kfree_skb_irq(skb);
1334 length = le16_to_cpu(rx_desc->wb.middle.length0);
1337 e_dbg("Last part of the packet spanning multiple descriptors\n");
1338 dev_kfree_skb_irq(skb);
1343 skb_put(skb, length);
1346 /* this looks ugly, but it seems compiler issues make
1347 * it more efficient than reusing j
1349 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1351 /* page alloc/put takes too long and effects small
1352 * packet throughput, so unsplit small packets and
1353 * save the alloc/put only valid in softirq (napi)
1354 * context to call kmap_*
1356 if (l1 && (l1 <= copybreak) &&
1357 ((length + l1) <= adapter->rx_ps_bsize0)) {
1360 ps_page = &buffer_info->ps_pages[0];
1362 /* there is no documentation about how to call
1363 * kmap_atomic, so we can't hold the mapping
1366 dma_sync_single_for_cpu(&pdev->dev,
1370 vaddr = kmap_atomic(ps_page->page);
1371 memcpy(skb_tail_pointer(skb), vaddr, l1);
1372 kunmap_atomic(vaddr);
1373 dma_sync_single_for_device(&pdev->dev,
1378 /* remove the CRC */
1379 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1380 if (!(netdev->features & NETIF_F_RXFCS))
1389 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1390 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1394 ps_page = &buffer_info->ps_pages[j];
1395 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1398 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1399 ps_page->page = NULL;
1401 skb->data_len += length;
1402 skb->truesize += PAGE_SIZE;
1405 /* strip the ethernet crc, problem is we're using pages now so
1406 * this whole operation can get a little cpu intensive
1408 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1409 if (!(netdev->features & NETIF_F_RXFCS))
1410 pskb_trim(skb, skb->len - 4);
1414 total_rx_bytes += skb->len;
1417 e1000_rx_checksum(adapter, staterr, skb);
1419 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1421 if (rx_desc->wb.upper.header_status &
1422 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1423 adapter->rx_hdr_split++;
1425 e1000_receive_skb(adapter, netdev, skb, staterr,
1426 rx_desc->wb.middle.vlan);
1429 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1430 buffer_info->skb = NULL;
1432 /* return some buffers to hardware, one at a time is too slow */
1433 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1434 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1439 /* use prefetched values */
1441 buffer_info = next_buffer;
1443 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1445 rx_ring->next_to_clean = i;
1447 cleaned_count = e1000_desc_unused(rx_ring);
1449 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1451 adapter->total_rx_bytes += total_rx_bytes;
1452 adapter->total_rx_packets += total_rx_packets;
1457 * e1000_consume_page - helper function
1459 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1464 skb->data_len += length;
1465 skb->truesize += PAGE_SIZE;
1469 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1470 * @adapter: board private structure
1472 * the return value indicates whether actual cleaning was done, there
1473 * is no guarantee that everything was cleaned
1475 static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1478 struct e1000_adapter *adapter = rx_ring->adapter;
1479 struct net_device *netdev = adapter->netdev;
1480 struct pci_dev *pdev = adapter->pdev;
1481 union e1000_rx_desc_extended *rx_desc, *next_rxd;
1482 struct e1000_buffer *buffer_info, *next_buffer;
1483 u32 length, staterr;
1485 int cleaned_count = 0;
1486 bool cleaned = false;
1487 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1488 struct skb_shared_info *shinfo;
1490 i = rx_ring->next_to_clean;
1491 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1492 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1493 buffer_info = &rx_ring->buffer_info[i];
1495 while (staterr & E1000_RXD_STAT_DD) {
1496 struct sk_buff *skb;
1498 if (*work_done >= work_to_do)
1501 rmb(); /* read descriptor and rx_buffer_info after status DD */
1503 skb = buffer_info->skb;
1504 buffer_info->skb = NULL;
1507 if (i == rx_ring->count)
1509 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1512 next_buffer = &rx_ring->buffer_info[i];
1516 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1518 buffer_info->dma = 0;
1520 length = le16_to_cpu(rx_desc->wb.upper.length);
1522 /* errors is only valid for DD + EOP descriptors */
1523 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1524 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1525 !(netdev->features & NETIF_F_RXALL)))) {
1526 /* recycle both page and skb */
1527 buffer_info->skb = skb;
1528 /* an error means any chain goes out the window too */
1529 if (rx_ring->rx_skb_top)
1530 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1531 rx_ring->rx_skb_top = NULL;
1534 #define rxtop (rx_ring->rx_skb_top)
1535 if (!(staterr & E1000_RXD_STAT_EOP)) {
1536 /* this descriptor is only the beginning (or middle) */
1538 /* this is the beginning of a chain */
1540 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1543 /* this is the middle of a chain */
1544 shinfo = skb_shinfo(rxtop);
1545 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1546 buffer_info->page, 0,
1548 /* re-use the skb, only consumed the page */
1549 buffer_info->skb = skb;
1551 e1000_consume_page(buffer_info, rxtop, length);
1555 /* end of the chain */
1556 shinfo = skb_shinfo(rxtop);
1557 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1558 buffer_info->page, 0,
1560 /* re-use the current skb, we only consumed the
1563 buffer_info->skb = skb;
1566 e1000_consume_page(buffer_info, skb, length);
1568 /* no chain, got EOP, this buf is the packet
1569 * copybreak to save the put_page/alloc_page
1571 if (length <= copybreak &&
1572 skb_tailroom(skb) >= length) {
1574 vaddr = kmap_atomic(buffer_info->page);
1575 memcpy(skb_tail_pointer(skb), vaddr,
1577 kunmap_atomic(vaddr);
1578 /* re-use the page, so don't erase
1581 skb_put(skb, length);
1583 skb_fill_page_desc(skb, 0,
1584 buffer_info->page, 0,
1586 e1000_consume_page(buffer_info, skb,
1592 /* Receive Checksum Offload */
1593 e1000_rx_checksum(adapter, staterr, skb);
1595 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1597 /* probably a little skewed due to removing CRC */
1598 total_rx_bytes += skb->len;
1601 /* eth type trans needs skb->data to point to something */
1602 if (!pskb_may_pull(skb, ETH_HLEN)) {
1603 e_err("pskb_may_pull failed.\n");
1604 dev_kfree_skb_irq(skb);
1608 e1000_receive_skb(adapter, netdev, skb, staterr,
1609 rx_desc->wb.upper.vlan);
1612 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1614 /* return some buffers to hardware, one at a time is too slow */
1615 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1616 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1621 /* use prefetched values */
1623 buffer_info = next_buffer;
1625 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1627 rx_ring->next_to_clean = i;
1629 cleaned_count = e1000_desc_unused(rx_ring);
1631 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1633 adapter->total_rx_bytes += total_rx_bytes;
1634 adapter->total_rx_packets += total_rx_packets;
1639 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1640 * @rx_ring: Rx descriptor ring
1642 static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1644 struct e1000_adapter *adapter = rx_ring->adapter;
1645 struct e1000_buffer *buffer_info;
1646 struct e1000_ps_page *ps_page;
1647 struct pci_dev *pdev = adapter->pdev;
1650 /* Free all the Rx ring sk_buffs */
1651 for (i = 0; i < rx_ring->count; i++) {
1652 buffer_info = &rx_ring->buffer_info[i];
1653 if (buffer_info->dma) {
1654 if (adapter->clean_rx == e1000_clean_rx_irq)
1655 dma_unmap_single(&pdev->dev, buffer_info->dma,
1656 adapter->rx_buffer_len,
1658 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1659 dma_unmap_page(&pdev->dev, buffer_info->dma,
1660 PAGE_SIZE, DMA_FROM_DEVICE);
1661 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1662 dma_unmap_single(&pdev->dev, buffer_info->dma,
1663 adapter->rx_ps_bsize0,
1665 buffer_info->dma = 0;
1668 if (buffer_info->page) {
1669 put_page(buffer_info->page);
1670 buffer_info->page = NULL;
1673 if (buffer_info->skb) {
1674 dev_kfree_skb(buffer_info->skb);
1675 buffer_info->skb = NULL;
1678 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1679 ps_page = &buffer_info->ps_pages[j];
1682 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1685 put_page(ps_page->page);
1686 ps_page->page = NULL;
1690 /* there also may be some cached data from a chained receive */
1691 if (rx_ring->rx_skb_top) {
1692 dev_kfree_skb(rx_ring->rx_skb_top);
1693 rx_ring->rx_skb_top = NULL;
1696 /* Zero out the descriptor ring */
1697 memset(rx_ring->desc, 0, rx_ring->size);
1699 rx_ring->next_to_clean = 0;
1700 rx_ring->next_to_use = 0;
1701 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1703 writel(0, rx_ring->head);
1704 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
1705 e1000e_update_rdt_wa(rx_ring, 0);
1707 writel(0, rx_ring->tail);
1710 static void e1000e_downshift_workaround(struct work_struct *work)
1712 struct e1000_adapter *adapter = container_of(work,
1713 struct e1000_adapter,
1716 if (test_bit(__E1000_DOWN, &adapter->state))
1719 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1723 * e1000_intr_msi - Interrupt Handler
1724 * @irq: interrupt number
1725 * @data: pointer to a network interface device structure
1727 static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
1729 struct net_device *netdev = data;
1730 struct e1000_adapter *adapter = netdev_priv(netdev);
1731 struct e1000_hw *hw = &adapter->hw;
1732 u32 icr = er32(ICR);
1734 /* read ICR disables interrupts using IAM */
1735 if (icr & E1000_ICR_LSC) {
1736 hw->mac.get_link_status = true;
1737 /* ICH8 workaround-- Call gig speed drop workaround on cable
1738 * disconnect (LSC) before accessing any PHY registers
1740 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1741 (!(er32(STATUS) & E1000_STATUS_LU)))
1742 schedule_work(&adapter->downshift_task);
1744 /* 80003ES2LAN workaround-- For packet buffer work-around on
1745 * link down event; disable receives here in the ISR and reset
1746 * adapter in watchdog
1748 if (netif_carrier_ok(netdev) &&
1749 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1750 /* disable receives */
1751 u32 rctl = er32(RCTL);
1752 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1753 adapter->flags |= FLAG_RESTART_NOW;
1755 /* guard against interrupt when we're going down */
1756 if (!test_bit(__E1000_DOWN, &adapter->state))
1757 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1760 /* Reset on uncorrectable ECC error */
1761 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1762 u32 pbeccsts = er32(PBECCSTS);
1764 adapter->corr_errors +=
1765 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1766 adapter->uncorr_errors +=
1767 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1768 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1770 /* Do the reset outside of interrupt context */
1771 schedule_work(&adapter->reset_task);
1773 /* return immediately since reset is imminent */
1777 if (napi_schedule_prep(&adapter->napi)) {
1778 adapter->total_tx_bytes = 0;
1779 adapter->total_tx_packets = 0;
1780 adapter->total_rx_bytes = 0;
1781 adapter->total_rx_packets = 0;
1782 __napi_schedule(&adapter->napi);
1789 * e1000_intr - Interrupt Handler
1790 * @irq: interrupt number
1791 * @data: pointer to a network interface device structure
1793 static irqreturn_t e1000_intr(int __always_unused irq, void *data)
1795 struct net_device *netdev = data;
1796 struct e1000_adapter *adapter = netdev_priv(netdev);
1797 struct e1000_hw *hw = &adapter->hw;
1798 u32 rctl, icr = er32(ICR);
1800 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1801 return IRQ_NONE; /* Not our interrupt */
1803 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1804 * not set, then the adapter didn't send an interrupt
1806 if (!(icr & E1000_ICR_INT_ASSERTED))
1809 /* Interrupt Auto-Mask...upon reading ICR,
1810 * interrupts are masked. No need for the
1814 if (icr & E1000_ICR_LSC) {
1815 hw->mac.get_link_status = true;
1816 /* ICH8 workaround-- Call gig speed drop workaround on cable
1817 * disconnect (LSC) before accessing any PHY registers
1819 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1820 (!(er32(STATUS) & E1000_STATUS_LU)))
1821 schedule_work(&adapter->downshift_task);
1823 /* 80003ES2LAN workaround--
1824 * For packet buffer work-around on link down event;
1825 * disable receives here in the ISR and
1826 * reset adapter in watchdog
1828 if (netif_carrier_ok(netdev) &&
1829 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1830 /* disable receives */
1832 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1833 adapter->flags |= FLAG_RESTART_NOW;
1835 /* guard against interrupt when we're going down */
1836 if (!test_bit(__E1000_DOWN, &adapter->state))
1837 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1840 /* Reset on uncorrectable ECC error */
1841 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1842 u32 pbeccsts = er32(PBECCSTS);
1844 adapter->corr_errors +=
1845 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1846 adapter->uncorr_errors +=
1847 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1848 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1850 /* Do the reset outside of interrupt context */
1851 schedule_work(&adapter->reset_task);
1853 /* return immediately since reset is imminent */
1857 if (napi_schedule_prep(&adapter->napi)) {
1858 adapter->total_tx_bytes = 0;
1859 adapter->total_tx_packets = 0;
1860 adapter->total_rx_bytes = 0;
1861 adapter->total_rx_packets = 0;
1862 __napi_schedule(&adapter->napi);
1868 static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
1870 struct net_device *netdev = data;
1871 struct e1000_adapter *adapter = netdev_priv(netdev);
1872 struct e1000_hw *hw = &adapter->hw;
1873 u32 icr = er32(ICR);
1875 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1876 if (!test_bit(__E1000_DOWN, &adapter->state))
1877 ew32(IMS, E1000_IMS_OTHER);
1881 if (icr & adapter->eiac_mask)
1882 ew32(ICS, (icr & adapter->eiac_mask));
1884 if (icr & E1000_ICR_OTHER) {
1885 if (!(icr & E1000_ICR_LSC))
1886 goto no_link_interrupt;
1887 hw->mac.get_link_status = true;
1888 /* guard against interrupt when we're going down */
1889 if (!test_bit(__E1000_DOWN, &adapter->state))
1890 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1894 if (!test_bit(__E1000_DOWN, &adapter->state))
1895 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1900 static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
1902 struct net_device *netdev = data;
1903 struct e1000_adapter *adapter = netdev_priv(netdev);
1904 struct e1000_hw *hw = &adapter->hw;
1905 struct e1000_ring *tx_ring = adapter->tx_ring;
1907 adapter->total_tx_bytes = 0;
1908 adapter->total_tx_packets = 0;
1910 if (!e1000_clean_tx_irq(tx_ring))
1911 /* Ring was not completely cleaned, so fire another interrupt */
1912 ew32(ICS, tx_ring->ims_val);
1917 static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
1919 struct net_device *netdev = data;
1920 struct e1000_adapter *adapter = netdev_priv(netdev);
1921 struct e1000_ring *rx_ring = adapter->rx_ring;
1923 /* Write the ITR value calculated at the end of the
1924 * previous interrupt.
1926 if (rx_ring->set_itr) {
1927 writel(1000000000 / (rx_ring->itr_val * 256),
1928 rx_ring->itr_register);
1929 rx_ring->set_itr = 0;
1932 if (napi_schedule_prep(&adapter->napi)) {
1933 adapter->total_rx_bytes = 0;
1934 adapter->total_rx_packets = 0;
1935 __napi_schedule(&adapter->napi);
1941 * e1000_configure_msix - Configure MSI-X hardware
1943 * e1000_configure_msix sets up the hardware to properly
1944 * generate MSI-X interrupts.
1946 static void e1000_configure_msix(struct e1000_adapter *adapter)
1948 struct e1000_hw *hw = &adapter->hw;
1949 struct e1000_ring *rx_ring = adapter->rx_ring;
1950 struct e1000_ring *tx_ring = adapter->tx_ring;
1952 u32 ctrl_ext, ivar = 0;
1954 adapter->eiac_mask = 0;
1956 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1957 if (hw->mac.type == e1000_82574) {
1958 u32 rfctl = er32(RFCTL);
1959 rfctl |= E1000_RFCTL_ACK_DIS;
1963 /* Configure Rx vector */
1964 rx_ring->ims_val = E1000_IMS_RXQ0;
1965 adapter->eiac_mask |= rx_ring->ims_val;
1966 if (rx_ring->itr_val)
1967 writel(1000000000 / (rx_ring->itr_val * 256),
1968 rx_ring->itr_register);
1970 writel(1, rx_ring->itr_register);
1971 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1973 /* Configure Tx vector */
1974 tx_ring->ims_val = E1000_IMS_TXQ0;
1976 if (tx_ring->itr_val)
1977 writel(1000000000 / (tx_ring->itr_val * 256),
1978 tx_ring->itr_register);
1980 writel(1, tx_ring->itr_register);
1981 adapter->eiac_mask |= tx_ring->ims_val;
1982 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1984 /* set vector for Other Causes, e.g. link changes */
1986 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1987 if (rx_ring->itr_val)
1988 writel(1000000000 / (rx_ring->itr_val * 256),
1989 hw->hw_addr + E1000_EITR_82574(vector));
1991 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1993 /* Cause Tx interrupts on every write back */
1998 /* enable MSI-X PBA support */
1999 ctrl_ext = er32(CTRL_EXT);
2000 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
2002 /* Auto-Mask Other interrupts upon ICR read */
2003 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
2004 ctrl_ext |= E1000_CTRL_EXT_EIAME;
2005 ew32(CTRL_EXT, ctrl_ext);
2009 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
2011 if (adapter->msix_entries) {
2012 pci_disable_msix(adapter->pdev);
2013 kfree(adapter->msix_entries);
2014 adapter->msix_entries = NULL;
2015 } else if (adapter->flags & FLAG_MSI_ENABLED) {
2016 pci_disable_msi(adapter->pdev);
2017 adapter->flags &= ~FLAG_MSI_ENABLED;
2022 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2024 * Attempt to configure interrupts using the best available
2025 * capabilities of the hardware and kernel.
2027 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2032 switch (adapter->int_mode) {
2033 case E1000E_INT_MODE_MSIX:
2034 if (adapter->flags & FLAG_HAS_MSIX) {
2035 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
2036 adapter->msix_entries = kcalloc(adapter->num_vectors,
2040 if (adapter->msix_entries) {
2041 struct e1000_adapter *a = adapter;
2043 for (i = 0; i < adapter->num_vectors; i++)
2044 adapter->msix_entries[i].entry = i;
2046 err = pci_enable_msix_range(a->pdev,
2053 /* MSI-X failed, so fall through and try MSI */
2054 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
2055 e1000e_reset_interrupt_capability(adapter);
2057 adapter->int_mode = E1000E_INT_MODE_MSI;
2059 case E1000E_INT_MODE_MSI:
2060 if (!pci_enable_msi(adapter->pdev)) {
2061 adapter->flags |= FLAG_MSI_ENABLED;
2063 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2064 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
2067 case E1000E_INT_MODE_LEGACY:
2068 /* Don't do anything; this is the system default */
2072 /* store the number of vectors being used */
2073 adapter->num_vectors = 1;
2077 * e1000_request_msix - Initialize MSI-X interrupts
2079 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2082 static int e1000_request_msix(struct e1000_adapter *adapter)
2084 struct net_device *netdev = adapter->netdev;
2085 int err = 0, vector = 0;
2087 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2088 snprintf(adapter->rx_ring->name,
2089 sizeof(adapter->rx_ring->name) - 1,
2090 "%s-rx-0", netdev->name);
2092 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
2093 err = request_irq(adapter->msix_entries[vector].vector,
2094 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
2098 adapter->rx_ring->itr_register = adapter->hw.hw_addr +
2099 E1000_EITR_82574(vector);
2100 adapter->rx_ring->itr_val = adapter->itr;
2103 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2104 snprintf(adapter->tx_ring->name,
2105 sizeof(adapter->tx_ring->name) - 1,
2106 "%s-tx-0", netdev->name);
2108 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2109 err = request_irq(adapter->msix_entries[vector].vector,
2110 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
2114 adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2115 E1000_EITR_82574(vector);
2116 adapter->tx_ring->itr_val = adapter->itr;
2119 err = request_irq(adapter->msix_entries[vector].vector,
2120 e1000_msix_other, 0, netdev->name, netdev);
2124 e1000_configure_msix(adapter);
2130 * e1000_request_irq - initialize interrupts
2132 * Attempts to configure interrupts using the best available
2133 * capabilities of the hardware and kernel.
2135 static int e1000_request_irq(struct e1000_adapter *adapter)
2137 struct net_device *netdev = adapter->netdev;
2140 if (adapter->msix_entries) {
2141 err = e1000_request_msix(adapter);
2144 /* fall back to MSI */
2145 e1000e_reset_interrupt_capability(adapter);
2146 adapter->int_mode = E1000E_INT_MODE_MSI;
2147 e1000e_set_interrupt_capability(adapter);
2149 if (adapter->flags & FLAG_MSI_ENABLED) {
2150 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2151 netdev->name, netdev);
2155 /* fall back to legacy interrupt */
2156 e1000e_reset_interrupt_capability(adapter);
2157 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2160 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2161 netdev->name, netdev);
2163 e_err("Unable to allocate interrupt, Error: %d\n", err);
2168 static void e1000_free_irq(struct e1000_adapter *adapter)
2170 struct net_device *netdev = adapter->netdev;
2172 if (adapter->msix_entries) {
2175 free_irq(adapter->msix_entries[vector].vector, netdev);
2178 free_irq(adapter->msix_entries[vector].vector, netdev);
2181 /* Other Causes interrupt vector */
2182 free_irq(adapter->msix_entries[vector].vector, netdev);
2186 free_irq(adapter->pdev->irq, netdev);
2190 * e1000_irq_disable - Mask off interrupt generation on the NIC
2192 static void e1000_irq_disable(struct e1000_adapter *adapter)
2194 struct e1000_hw *hw = &adapter->hw;
2197 if (adapter->msix_entries)
2198 ew32(EIAC_82574, 0);
2201 if (adapter->msix_entries) {
2203 for (i = 0; i < adapter->num_vectors; i++)
2204 synchronize_irq(adapter->msix_entries[i].vector);
2206 synchronize_irq(adapter->pdev->irq);
2211 * e1000_irq_enable - Enable default interrupt generation settings
2213 static void e1000_irq_enable(struct e1000_adapter *adapter)
2215 struct e1000_hw *hw = &adapter->hw;
2217 if (adapter->msix_entries) {
2218 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2219 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2220 } else if (hw->mac.type == e1000_pch_lpt) {
2221 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2223 ew32(IMS, IMS_ENABLE_MASK);
2229 * e1000e_get_hw_control - get control of the h/w from f/w
2230 * @adapter: address of board private structure
2232 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2233 * For ASF and Pass Through versions of f/w this means that
2234 * the driver is loaded. For AMT version (only with 82573)
2235 * of the f/w this means that the network i/f is open.
2237 void e1000e_get_hw_control(struct e1000_adapter *adapter)
2239 struct e1000_hw *hw = &adapter->hw;
2243 /* Let firmware know the driver has taken over */
2244 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2246 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2247 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2248 ctrl_ext = er32(CTRL_EXT);
2249 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2254 * e1000e_release_hw_control - release control of the h/w to f/w
2255 * @adapter: address of board private structure
2257 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2258 * For ASF and Pass Through versions of f/w this means that the
2259 * driver is no longer loaded. For AMT version (only with 82573) i
2260 * of the f/w this means that the network i/f is closed.
2263 void e1000e_release_hw_control(struct e1000_adapter *adapter)
2265 struct e1000_hw *hw = &adapter->hw;
2269 /* Let firmware taken over control of h/w */
2270 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2272 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2273 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2274 ctrl_ext = er32(CTRL_EXT);
2275 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2280 * e1000_alloc_ring_dma - allocate memory for a ring structure
2282 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2283 struct e1000_ring *ring)
2285 struct pci_dev *pdev = adapter->pdev;
2287 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2296 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2297 * @tx_ring: Tx descriptor ring
2299 * Return 0 on success, negative on failure
2301 int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2303 struct e1000_adapter *adapter = tx_ring->adapter;
2304 int err = -ENOMEM, size;
2306 size = sizeof(struct e1000_buffer) * tx_ring->count;
2307 tx_ring->buffer_info = vzalloc(size);
2308 if (!tx_ring->buffer_info)
2311 /* round up to nearest 4K */
2312 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2313 tx_ring->size = ALIGN(tx_ring->size, 4096);
2315 err = e1000_alloc_ring_dma(adapter, tx_ring);
2319 tx_ring->next_to_use = 0;
2320 tx_ring->next_to_clean = 0;
2324 vfree(tx_ring->buffer_info);
2325 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2330 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2331 * @rx_ring: Rx descriptor ring
2333 * Returns 0 on success, negative on failure
2335 int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2337 struct e1000_adapter *adapter = rx_ring->adapter;
2338 struct e1000_buffer *buffer_info;
2339 int i, size, desc_len, err = -ENOMEM;
2341 size = sizeof(struct e1000_buffer) * rx_ring->count;
2342 rx_ring->buffer_info = vzalloc(size);
2343 if (!rx_ring->buffer_info)
2346 for (i = 0; i < rx_ring->count; i++) {
2347 buffer_info = &rx_ring->buffer_info[i];
2348 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2349 sizeof(struct e1000_ps_page),
2351 if (!buffer_info->ps_pages)
2355 desc_len = sizeof(union e1000_rx_desc_packet_split);
2357 /* Round up to nearest 4K */
2358 rx_ring->size = rx_ring->count * desc_len;
2359 rx_ring->size = ALIGN(rx_ring->size, 4096);
2361 err = e1000_alloc_ring_dma(adapter, rx_ring);
2365 rx_ring->next_to_clean = 0;
2366 rx_ring->next_to_use = 0;
2367 rx_ring->rx_skb_top = NULL;
2372 for (i = 0; i < rx_ring->count; i++) {
2373 buffer_info = &rx_ring->buffer_info[i];
2374 kfree(buffer_info->ps_pages);
2377 vfree(rx_ring->buffer_info);
2378 e_err("Unable to allocate memory for the receive descriptor ring\n");
2383 * e1000_clean_tx_ring - Free Tx Buffers
2384 * @tx_ring: Tx descriptor ring
2386 static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2388 struct e1000_adapter *adapter = tx_ring->adapter;
2389 struct e1000_buffer *buffer_info;
2393 for (i = 0; i < tx_ring->count; i++) {
2394 buffer_info = &tx_ring->buffer_info[i];
2395 e1000_put_txbuf(tx_ring, buffer_info);
2398 netdev_reset_queue(adapter->netdev);
2399 size = sizeof(struct e1000_buffer) * tx_ring->count;
2400 memset(tx_ring->buffer_info, 0, size);
2402 memset(tx_ring->desc, 0, tx_ring->size);
2404 tx_ring->next_to_use = 0;
2405 tx_ring->next_to_clean = 0;
2407 writel(0, tx_ring->head);
2408 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2409 e1000e_update_tdt_wa(tx_ring, 0);
2411 writel(0, tx_ring->tail);
2415 * e1000e_free_tx_resources - Free Tx Resources per Queue
2416 * @tx_ring: Tx descriptor ring
2418 * Free all transmit software resources
2420 void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2422 struct e1000_adapter *adapter = tx_ring->adapter;
2423 struct pci_dev *pdev = adapter->pdev;
2425 e1000_clean_tx_ring(tx_ring);
2427 vfree(tx_ring->buffer_info);
2428 tx_ring->buffer_info = NULL;
2430 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2432 tx_ring->desc = NULL;
2436 * e1000e_free_rx_resources - Free Rx Resources
2437 * @rx_ring: Rx descriptor ring
2439 * Free all receive software resources
2441 void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2443 struct e1000_adapter *adapter = rx_ring->adapter;
2444 struct pci_dev *pdev = adapter->pdev;
2447 e1000_clean_rx_ring(rx_ring);
2449 for (i = 0; i < rx_ring->count; i++)
2450 kfree(rx_ring->buffer_info[i].ps_pages);
2452 vfree(rx_ring->buffer_info);
2453 rx_ring->buffer_info = NULL;
2455 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2457 rx_ring->desc = NULL;
2461 * e1000_update_itr - update the dynamic ITR value based on statistics
2462 * @adapter: pointer to adapter
2463 * @itr_setting: current adapter->itr
2464 * @packets: the number of packets during this measurement interval
2465 * @bytes: the number of bytes during this measurement interval
2467 * Stores a new ITR value based on packets and byte
2468 * counts during the last interrupt. The advantage of per interrupt
2469 * computation is faster updates and more accurate ITR for the current
2470 * traffic pattern. Constants in this function were computed
2471 * based on theoretical maximum wire speed and thresholds were set based
2472 * on testing data as well as attempting to minimize response time
2473 * while increasing bulk throughput. This functionality is controlled
2474 * by the InterruptThrottleRate module parameter.
2476 static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2478 unsigned int retval = itr_setting;
2483 switch (itr_setting) {
2484 case lowest_latency:
2485 /* handle TSO and jumbo frames */
2486 if (bytes / packets > 8000)
2487 retval = bulk_latency;
2488 else if ((packets < 5) && (bytes > 512))
2489 retval = low_latency;
2491 case low_latency: /* 50 usec aka 20000 ints/s */
2492 if (bytes > 10000) {
2493 /* this if handles the TSO accounting */
2494 if (bytes / packets > 8000)
2495 retval = bulk_latency;
2496 else if ((packets < 10) || ((bytes / packets) > 1200))
2497 retval = bulk_latency;
2498 else if ((packets > 35))
2499 retval = lowest_latency;
2500 } else if (bytes / packets > 2000) {
2501 retval = bulk_latency;
2502 } else if (packets <= 2 && bytes < 512) {
2503 retval = lowest_latency;
2506 case bulk_latency: /* 250 usec aka 4000 ints/s */
2507 if (bytes > 25000) {
2509 retval = low_latency;
2510 } else if (bytes < 6000) {
2511 retval = low_latency;
2519 static void e1000_set_itr(struct e1000_adapter *adapter)
2522 u32 new_itr = adapter->itr;
2524 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2525 if (adapter->link_speed != SPEED_1000) {
2531 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2536 adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
2537 adapter->total_tx_packets,
2538 adapter->total_tx_bytes);
2539 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2540 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2541 adapter->tx_itr = low_latency;
2543 adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
2544 adapter->total_rx_packets,
2545 adapter->total_rx_bytes);
2546 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2547 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2548 adapter->rx_itr = low_latency;
2550 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2552 /* counts and packets in update_itr are dependent on these numbers */
2553 switch (current_itr) {
2554 case lowest_latency:
2558 new_itr = 20000; /* aka hwitr = ~200 */
2568 if (new_itr != adapter->itr) {
2569 /* this attempts to bias the interrupt rate towards Bulk
2570 * by adding intermediate steps when interrupt rate is
2573 new_itr = new_itr > adapter->itr ?
2574 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
2575 adapter->itr = new_itr;
2576 adapter->rx_ring->itr_val = new_itr;
2577 if (adapter->msix_entries)
2578 adapter->rx_ring->set_itr = 1;
2580 e1000e_write_itr(adapter, new_itr);
2585 * e1000e_write_itr - write the ITR value to the appropriate registers
2586 * @adapter: address of board private structure
2587 * @itr: new ITR value to program
2589 * e1000e_write_itr determines if the adapter is in MSI-X mode
2590 * and, if so, writes the EITR registers with the ITR value.
2591 * Otherwise, it writes the ITR value into the ITR register.
2593 void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2595 struct e1000_hw *hw = &adapter->hw;
2596 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2598 if (adapter->msix_entries) {
2601 for (vector = 0; vector < adapter->num_vectors; vector++)
2602 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2609 * e1000_alloc_queues - Allocate memory for all rings
2610 * @adapter: board private structure to initialize
2612 static int e1000_alloc_queues(struct e1000_adapter *adapter)
2614 int size = sizeof(struct e1000_ring);
2616 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2617 if (!adapter->tx_ring)
2619 adapter->tx_ring->count = adapter->tx_ring_count;
2620 adapter->tx_ring->adapter = adapter;
2622 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2623 if (!adapter->rx_ring)
2625 adapter->rx_ring->count = adapter->rx_ring_count;
2626 adapter->rx_ring->adapter = adapter;
2630 e_err("Unable to allocate memory for queues\n");
2631 kfree(adapter->rx_ring);
2632 kfree(adapter->tx_ring);
2637 * e1000e_poll - NAPI Rx polling callback
2638 * @napi: struct associated with this polling callback
2639 * @weight: number of packets driver is allowed to process this poll
2641 static int e1000e_poll(struct napi_struct *napi, int weight)
2643 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2645 struct e1000_hw *hw = &adapter->hw;
2646 struct net_device *poll_dev = adapter->netdev;
2647 int tx_cleaned = 1, work_done = 0;
2649 adapter = netdev_priv(poll_dev);
2651 if (!adapter->msix_entries ||
2652 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2653 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2655 adapter->clean_rx(adapter->rx_ring, &work_done, weight);
2660 /* If weight not fully consumed, exit the polling mode */
2661 if (work_done < weight) {
2662 if (adapter->itr_setting & 3)
2663 e1000_set_itr(adapter);
2664 napi_complete(napi);
2665 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2666 if (adapter->msix_entries)
2667 ew32(IMS, adapter->rx_ring->ims_val);
2669 e1000_irq_enable(adapter);
2676 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
2677 __always_unused __be16 proto, u16 vid)
2679 struct e1000_adapter *adapter = netdev_priv(netdev);
2680 struct e1000_hw *hw = &adapter->hw;
2683 /* don't update vlan cookie if already programmed */
2684 if ((adapter->hw.mng_cookie.status &
2685 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2686 (vid == adapter->mng_vlan_id))
2689 /* add VID to filter table */
2690 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2691 index = (vid >> 5) & 0x7F;
2692 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2693 vfta |= (1 << (vid & 0x1F));
2694 hw->mac.ops.write_vfta(hw, index, vfta);
2697 set_bit(vid, adapter->active_vlans);
2702 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
2703 __always_unused __be16 proto, u16 vid)
2705 struct e1000_adapter *adapter = netdev_priv(netdev);
2706 struct e1000_hw *hw = &adapter->hw;
2709 if ((adapter->hw.mng_cookie.status &
2710 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2711 (vid == adapter->mng_vlan_id)) {
2712 /* release control to f/w */
2713 e1000e_release_hw_control(adapter);
2717 /* remove VID from filter table */
2718 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2719 index = (vid >> 5) & 0x7F;
2720 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2721 vfta &= ~(1 << (vid & 0x1F));
2722 hw->mac.ops.write_vfta(hw, index, vfta);
2725 clear_bit(vid, adapter->active_vlans);
2731 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2732 * @adapter: board private structure to initialize
2734 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2736 struct net_device *netdev = adapter->netdev;
2737 struct e1000_hw *hw = &adapter->hw;
2740 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2741 /* disable VLAN receive filtering */
2743 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2746 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2747 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
2748 adapter->mng_vlan_id);
2749 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2755 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2756 * @adapter: board private structure to initialize
2758 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2760 struct e1000_hw *hw = &adapter->hw;
2763 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2764 /* enable VLAN receive filtering */
2766 rctl |= E1000_RCTL_VFE;
2767 rctl &= ~E1000_RCTL_CFIEN;
2773 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2774 * @adapter: board private structure to initialize
2776 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2778 struct e1000_hw *hw = &adapter->hw;
2781 /* disable VLAN tag insert/strip */
2783 ctrl &= ~E1000_CTRL_VME;
2788 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2789 * @adapter: board private structure to initialize
2791 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2793 struct e1000_hw *hw = &adapter->hw;
2796 /* enable VLAN tag insert/strip */
2798 ctrl |= E1000_CTRL_VME;
2802 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2804 struct net_device *netdev = adapter->netdev;
2805 u16 vid = adapter->hw.mng_cookie.vlan_id;
2806 u16 old_vid = adapter->mng_vlan_id;
2808 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2809 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
2810 adapter->mng_vlan_id = vid;
2813 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2814 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
2817 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2821 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
2823 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2824 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2827 static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2829 struct e1000_hw *hw = &adapter->hw;
2830 u32 manc, manc2h, mdef, i, j;
2832 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2837 /* enable receiving management packets to the host. this will probably
2838 * generate destination unreachable messages from the host OS, but
2839 * the packets will be handled on SMBUS
2841 manc |= E1000_MANC_EN_MNG2HOST;
2842 manc2h = er32(MANC2H);
2844 switch (hw->mac.type) {
2846 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2850 /* Check if IPMI pass-through decision filter already exists;
2853 for (i = 0, j = 0; i < 8; i++) {
2854 mdef = er32(MDEF(i));
2856 /* Ignore filters with anything other than IPMI ports */
2857 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2860 /* Enable this decision filter in MANC2H */
2867 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2870 /* Create new decision filter in an empty filter */
2871 for (i = 0, j = 0; i < 8; i++)
2872 if (er32(MDEF(i)) == 0) {
2873 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2874 E1000_MDEF_PORT_664));
2881 e_warn("Unable to create IPMI pass-through filter\n");
2885 ew32(MANC2H, manc2h);
2890 * e1000_configure_tx - Configure Transmit Unit after Reset
2891 * @adapter: board private structure
2893 * Configure the Tx unit of the MAC after a reset.
2895 static void e1000_configure_tx(struct e1000_adapter *adapter)
2897 struct e1000_hw *hw = &adapter->hw;
2898 struct e1000_ring *tx_ring = adapter->tx_ring;
2902 /* Setup the HW Tx Head and Tail descriptor pointers */
2903 tdba = tx_ring->dma;
2904 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2905 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2906 ew32(TDBAH(0), (tdba >> 32));
2907 ew32(TDLEN(0), tdlen);
2910 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2911 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2913 /* Set the Tx Interrupt Delay register */
2914 ew32(TIDV, adapter->tx_int_delay);
2915 /* Tx irq moderation */
2916 ew32(TADV, adapter->tx_abs_int_delay);
2918 if (adapter->flags2 & FLAG2_DMA_BURST) {
2919 u32 txdctl = er32(TXDCTL(0));
2920 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2921 E1000_TXDCTL_WTHRESH);
2922 /* set up some performance related parameters to encourage the
2923 * hardware to use the bus more efficiently in bursts, depends
2924 * on the tx_int_delay to be enabled,
2925 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
2926 * hthresh = 1 ==> prefetch when one or more available
2927 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2928 * BEWARE: this seems to work but should be considered first if
2929 * there are Tx hangs or other Tx related bugs
2931 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2932 ew32(TXDCTL(0), txdctl);
2934 /* erratum work around: set txdctl the same for both queues */
2935 ew32(TXDCTL(1), er32(TXDCTL(0)));
2937 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2938 tarc = er32(TARC(0));
2939 /* set the speed mode bit, we'll clear it if we're not at
2940 * gigabit link later
2942 #define SPEED_MODE_BIT (1 << 21)
2943 tarc |= SPEED_MODE_BIT;
2944 ew32(TARC(0), tarc);
2947 /* errata: program both queues to unweighted RR */
2948 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2949 tarc = er32(TARC(0));
2951 ew32(TARC(0), tarc);
2952 tarc = er32(TARC(1));
2954 ew32(TARC(1), tarc);
2957 /* Setup Transmit Descriptor Settings for eop descriptor */
2958 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2960 /* only set IDE if we are delaying interrupts using the timers */
2961 if (adapter->tx_int_delay)
2962 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2964 /* enable Report Status bit */
2965 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2967 hw->mac.ops.config_collision_dist(hw);
2971 * e1000_setup_rctl - configure the receive control registers
2972 * @adapter: Board private structure
2974 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2975 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2976 static void e1000_setup_rctl(struct e1000_adapter *adapter)
2978 struct e1000_hw *hw = &adapter->hw;
2982 /* Workaround Si errata on PCHx - configure jumbo frame flow */
2983 if ((hw->mac.type >= e1000_pch2lan) &&
2984 (adapter->netdev->mtu > ETH_DATA_LEN) &&
2985 e1000_lv_jumbo_workaround_ich8lan(hw, true))
2986 e_dbg("failed to enable jumbo frame workaround mode\n");
2988 /* Program MC offset vector base */
2990 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2991 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2992 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2993 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2995 /* Do not Store bad packets */
2996 rctl &= ~E1000_RCTL_SBP;
2998 /* Enable Long Packet receive */
2999 if (adapter->netdev->mtu <= ETH_DATA_LEN)
3000 rctl &= ~E1000_RCTL_LPE;
3002 rctl |= E1000_RCTL_LPE;
3004 /* Some systems expect that the CRC is included in SMBUS traffic. The
3005 * hardware strips the CRC before sending to both SMBUS (BMC) and to
3006 * host memory when this is enabled
3008 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
3009 rctl |= E1000_RCTL_SECRC;
3011 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
3012 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
3015 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
3017 phy_data |= (1 << 2);
3018 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
3020 e1e_rphy(hw, 22, &phy_data);
3022 phy_data |= (1 << 14);
3023 e1e_wphy(hw, 0x10, 0x2823);
3024 e1e_wphy(hw, 0x11, 0x0003);
3025 e1e_wphy(hw, 22, phy_data);
3028 /* Setup buffer sizes */
3029 rctl &= ~E1000_RCTL_SZ_4096;
3030 rctl |= E1000_RCTL_BSEX;
3031 switch (adapter->rx_buffer_len) {
3034 rctl |= E1000_RCTL_SZ_2048;
3035 rctl &= ~E1000_RCTL_BSEX;
3038 rctl |= E1000_RCTL_SZ_4096;
3041 rctl |= E1000_RCTL_SZ_8192;
3044 rctl |= E1000_RCTL_SZ_16384;
3048 /* Enable Extended Status in all Receive Descriptors */
3049 rfctl = er32(RFCTL);
3050 rfctl |= E1000_RFCTL_EXTEN;
3053 /* 82571 and greater support packet-split where the protocol
3054 * header is placed in skb->data and the packet data is
3055 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
3056 * In the case of a non-split, skb->data is linearly filled,
3057 * followed by the page buffers. Therefore, skb->data is
3058 * sized to hold the largest protocol header.
3060 * allocations using alloc_page take too long for regular MTU
3061 * so only enable packet split for jumbo frames
3063 * Using pages when the page size is greater than 16k wastes
3064 * a lot of memory, since we allocate 3 pages at all times
3067 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
3068 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
3069 adapter->rx_ps_pages = pages;
3071 adapter->rx_ps_pages = 0;
3073 if (adapter->rx_ps_pages) {
3076 /* Enable Packet split descriptors */
3077 rctl |= E1000_RCTL_DTYP_PS;
3079 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
3081 switch (adapter->rx_ps_pages) {
3083 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
3086 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
3089 psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
3093 ew32(PSRCTL, psrctl);
3096 /* This is useful for sniffing bad packets. */
3097 if (adapter->netdev->features & NETIF_F_RXALL) {
3098 /* UPE and MPE will be handled by normal PROMISC logic
3099 * in e1000e_set_rx_mode
3101 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3102 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3103 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3105 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3106 E1000_RCTL_DPF | /* Allow filtered pause */
3107 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3108 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3109 * and that breaks VLANs.
3114 /* just started the receive unit, no need to restart */
3115 adapter->flags &= ~FLAG_RESTART_NOW;
3119 * e1000_configure_rx - Configure Receive Unit after Reset
3120 * @adapter: board private structure
3122 * Configure the Rx unit of the MAC after a reset.
3124 static void e1000_configure_rx(struct e1000_adapter *adapter)
3126 struct e1000_hw *hw = &adapter->hw;
3127 struct e1000_ring *rx_ring = adapter->rx_ring;
3129 u32 rdlen, rctl, rxcsum, ctrl_ext;
3131 if (adapter->rx_ps_pages) {
3132 /* this is a 32 byte descriptor */
3133 rdlen = rx_ring->count *
3134 sizeof(union e1000_rx_desc_packet_split);
3135 adapter->clean_rx = e1000_clean_rx_irq_ps;
3136 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3137 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3138 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3139 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3140 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3142 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3143 adapter->clean_rx = e1000_clean_rx_irq;
3144 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3147 /* disable receives while setting up the descriptors */
3149 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3150 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3152 usleep_range(10000, 20000);
3154 if (adapter->flags2 & FLAG2_DMA_BURST) {
3155 /* set the writeback threshold (only takes effect if the RDTR
3156 * is set). set GRAN=1 and write back up to 0x4 worth, and
3157 * enable prefetching of 0x20 Rx descriptors
3163 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3164 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3166 /* override the delay timers for enabling bursting, only if
3167 * the value was not set by the user via module options
3169 if (adapter->rx_int_delay == DEFAULT_RDTR)
3170 adapter->rx_int_delay = BURST_RDTR;
3171 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3172 adapter->rx_abs_int_delay = BURST_RADV;
3175 /* set the Receive Delay Timer Register */
3176 ew32(RDTR, adapter->rx_int_delay);
3178 /* irq moderation */
3179 ew32(RADV, adapter->rx_abs_int_delay);
3180 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3181 e1000e_write_itr(adapter, adapter->itr);
3183 ctrl_ext = er32(CTRL_EXT);
3184 /* Auto-Mask interrupts upon ICR access */
3185 ctrl_ext |= E1000_CTRL_EXT_IAME;
3186 ew32(IAM, 0xffffffff);
3187 ew32(CTRL_EXT, ctrl_ext);
3190 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3191 * the Base and Length of the Rx Descriptor Ring
3193 rdba = rx_ring->dma;
3194 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3195 ew32(RDBAH(0), (rdba >> 32));
3196 ew32(RDLEN(0), rdlen);
3199 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3200 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3202 /* Enable Receive Checksum Offload for TCP and UDP */
3203 rxcsum = er32(RXCSUM);
3204 if (adapter->netdev->features & NETIF_F_RXCSUM)
3205 rxcsum |= E1000_RXCSUM_TUOFL;
3207 rxcsum &= ~E1000_RXCSUM_TUOFL;
3208 ew32(RXCSUM, rxcsum);
3210 /* With jumbo frames, excessive C-state transition latencies result
3211 * in dropped transactions.
3213 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3215 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
3216 adapter->max_frame_size) * 8 / 1000;
3218 if (adapter->flags & FLAG_IS_ICH) {
3219 u32 rxdctl = er32(RXDCTL(0));
3220 ew32(RXDCTL(0), rxdctl | 0x3);
3223 pm_qos_update_request(&adapter->netdev->pm_qos_req, lat);
3225 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3226 PM_QOS_DEFAULT_VALUE);
3229 /* Enable Receives */
3234 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3235 * @netdev: network interface device structure
3237 * Writes multicast address list to the MTA hash table.
3238 * Returns: -ENOMEM on failure
3239 * 0 on no addresses written
3240 * X on writing X addresses to MTA
3242 static int e1000e_write_mc_addr_list(struct net_device *netdev)
3244 struct e1000_adapter *adapter = netdev_priv(netdev);
3245 struct e1000_hw *hw = &adapter->hw;
3246 struct netdev_hw_addr *ha;
3250 if (netdev_mc_empty(netdev)) {
3251 /* nothing to program, so clear mc list */
3252 hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3256 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
3260 /* update_mc_addr_list expects a packed array of only addresses. */
3262 netdev_for_each_mc_addr(ha, netdev)
3263 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3265 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3268 return netdev_mc_count(netdev);
3272 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3273 * @netdev: network interface device structure
3275 * Writes unicast address list to the RAR table.
3276 * Returns: -ENOMEM on failure/insufficient address space
3277 * 0 on no addresses written
3278 * X on writing X addresses to the RAR table
3280 static int e1000e_write_uc_addr_list(struct net_device *netdev)
3282 struct e1000_adapter *adapter = netdev_priv(netdev);
3283 struct e1000_hw *hw = &adapter->hw;
3284 unsigned int rar_entries = hw->mac.rar_entry_count;
3287 /* save a rar entry for our hardware address */
3290 /* save a rar entry for the LAA workaround */
3291 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3294 /* return ENOMEM indicating insufficient memory for addresses */
3295 if (netdev_uc_count(netdev) > rar_entries)
3298 if (!netdev_uc_empty(netdev) && rar_entries) {
3299 struct netdev_hw_addr *ha;
3301 /* write the addresses in reverse order to avoid write
3304 netdev_for_each_uc_addr(ha, netdev) {
3307 hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3312 /* zero out the remaining RAR entries not used above */
3313 for (; rar_entries > 0; rar_entries--) {
3314 ew32(RAH(rar_entries), 0);
3315 ew32(RAL(rar_entries), 0);
3323 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3324 * @netdev: network interface device structure
3326 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3327 * address list or the network interface flags are updated. This routine is
3328 * responsible for configuring the hardware for proper unicast, multicast,
3329 * promiscuous mode, and all-multi behavior.
3331 static void e1000e_set_rx_mode(struct net_device *netdev)
3333 struct e1000_adapter *adapter = netdev_priv(netdev);
3334 struct e1000_hw *hw = &adapter->hw;
3337 /* Check for Promiscuous and All Multicast modes */
3340 /* clear the affected bits */
3341 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3343 if (netdev->flags & IFF_PROMISC) {
3344 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3345 /* Do not hardware filter VLANs in promisc mode */
3346 e1000e_vlan_filter_disable(adapter);
3350 if (netdev->flags & IFF_ALLMULTI) {
3351 rctl |= E1000_RCTL_MPE;
3353 /* Write addresses to the MTA, if the attempt fails
3354 * then we should just turn on promiscuous mode so
3355 * that we can at least receive multicast traffic
3357 count = e1000e_write_mc_addr_list(netdev);
3359 rctl |= E1000_RCTL_MPE;
3361 e1000e_vlan_filter_enable(adapter);
3362 /* Write addresses to available RAR registers, if there is not
3363 * sufficient space to store all the addresses then enable
3364 * unicast promiscuous mode
3366 count = e1000e_write_uc_addr_list(netdev);
3368 rctl |= E1000_RCTL_UPE;
3373 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3374 e1000e_vlan_strip_enable(adapter);
3376 e1000e_vlan_strip_disable(adapter);
3379 static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3381 struct e1000_hw *hw = &adapter->hw;
3384 static const u32 rsskey[10] = {
3385 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
3386 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
3389 /* Fill out hash function seed */
3390 for (i = 0; i < 10; i++)
3391 ew32(RSSRK(i), rsskey[i]);
3393 /* Direct all traffic to queue 0 */
3394 for (i = 0; i < 32; i++)
3397 /* Disable raw packet checksumming so that RSS hash is placed in
3398 * descriptor on writeback.
3400 rxcsum = er32(RXCSUM);
3401 rxcsum |= E1000_RXCSUM_PCSD;
3403 ew32(RXCSUM, rxcsum);
3405 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3406 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3407 E1000_MRQC_RSS_FIELD_IPV6 |
3408 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3409 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3415 * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3416 * @adapter: board private structure
3417 * @timinca: pointer to returned time increment attributes
3419 * Get attributes for incrementing the System Time Register SYSTIML/H at
3420 * the default base frequency, and set the cyclecounter shift value.
3422 s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
3424 struct e1000_hw *hw = &adapter->hw;
3425 u32 incvalue, incperiod, shift;
3427 /* Make sure clock is enabled on I217 before checking the frequency */
3428 if ((hw->mac.type == e1000_pch_lpt) &&
3429 !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
3430 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
3431 u32 fextnvm7 = er32(FEXTNVM7);
3433 if (!(fextnvm7 & (1 << 0))) {
3434 ew32(FEXTNVM7, fextnvm7 | (1 << 0));
3439 switch (hw->mac.type) {
3442 /* On I217, the clock frequency is 25MHz or 96MHz as
3443 * indicated by the System Clock Frequency Indication
3445 if ((hw->mac.type != e1000_pch_lpt) ||
3446 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
3447 /* Stable 96MHz frequency */
3448 incperiod = INCPERIOD_96MHz;
3449 incvalue = INCVALUE_96MHz;
3450 shift = INCVALUE_SHIFT_96MHz;
3451 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
3457 /* Stable 25MHz frequency */
3458 incperiod = INCPERIOD_25MHz;
3459 incvalue = INCVALUE_25MHz;
3460 shift = INCVALUE_SHIFT_25MHz;
3461 adapter->cc.shift = shift;
3467 *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
3468 ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
3474 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3475 * @adapter: board private structure
3477 * Outgoing time stamping can be enabled and disabled. Play nice and
3478 * disable it when requested, although it shouldn't cause any overhead
3479 * when no packet needs it. At most one packet in the queue may be
3480 * marked for time stamping, otherwise it would be impossible to tell
3481 * for sure to which packet the hardware time stamp belongs.
3483 * Incoming time stamping has to be configured via the hardware filters.
3484 * Not all combinations are supported, in particular event type has to be
3485 * specified. Matching the kind of event packet is not supported, with the
3486 * exception of "all V2 events regardless of level 2 or 4".
3488 static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
3489 struct hwtstamp_config *config)
3491 struct e1000_hw *hw = &adapter->hw;
3492 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
3493 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
3501 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
3504 /* flags reserved for future extensions - must be zero */
3508 switch (config->tx_type) {
3509 case HWTSTAMP_TX_OFF:
3512 case HWTSTAMP_TX_ON:
3518 switch (config->rx_filter) {
3519 case HWTSTAMP_FILTER_NONE:
3522 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3523 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3524 rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
3527 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3528 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3529 rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
3532 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3533 /* Also time stamps V2 L2 Path Delay Request/Response */
3534 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3535 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3538 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3539 /* Also time stamps V2 L2 Path Delay Request/Response. */
3540 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3541 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3544 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3545 /* Hardware cannot filter just V2 L4 Sync messages;
3546 * fall-through to V2 (both L2 and L4) Sync.
3548 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3549 /* Also time stamps V2 Path Delay Request/Response. */
3550 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3551 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3555 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3556 /* Hardware cannot filter just V2 L4 Delay Request messages;
3557 * fall-through to V2 (both L2 and L4) Delay Request.
3559 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3560 /* Also time stamps V2 Path Delay Request/Response. */
3561 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3562 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3566 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3567 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3568 /* Hardware cannot filter just V2 L4 or L2 Event messages;
3569 * fall-through to all V2 (both L2 and L4) Events.
3571 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3572 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
3573 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3577 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3578 /* For V1, the hardware can only filter Sync messages or
3579 * Delay Request messages but not both so fall-through to
3580 * time stamp all packets.
3582 case HWTSTAMP_FILTER_ALL:
3585 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
3586 config->rx_filter = HWTSTAMP_FILTER_ALL;
3592 adapter->hwtstamp_config = *config;
3594 /* enable/disable Tx h/w time stamping */
3595 regval = er32(TSYNCTXCTL);
3596 regval &= ~E1000_TSYNCTXCTL_ENABLED;
3597 regval |= tsync_tx_ctl;
3598 ew32(TSYNCTXCTL, regval);
3599 if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
3600 (regval & E1000_TSYNCTXCTL_ENABLED)) {
3601 e_err("Timesync Tx Control register not set as expected\n");
3605 /* enable/disable Rx h/w time stamping */
3606 regval = er32(TSYNCRXCTL);
3607 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
3608 regval |= tsync_rx_ctl;
3609 ew32(TSYNCRXCTL, regval);
3610 if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
3611 E1000_TSYNCRXCTL_TYPE_MASK)) !=
3612 (regval & (E1000_TSYNCRXCTL_ENABLED |
3613 E1000_TSYNCRXCTL_TYPE_MASK))) {
3614 e_err("Timesync Rx Control register not set as expected\n");
3618 /* L2: define ethertype filter for time stamped packets */
3620 rxmtrl |= ETH_P_1588;
3622 /* define which PTP packets get time stamped */
3623 ew32(RXMTRL, rxmtrl);
3625 /* Filter by destination port */
3627 rxudp = PTP_EV_PORT;
3628 cpu_to_be16s(&rxudp);
3634 /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
3638 /* Get and set the System Time Register SYSTIM base frequency */
3639 ret_val = e1000e_get_base_timinca(adapter, ®val);
3642 ew32(TIMINCA, regval);
3644 /* reset the ns time counter */
3645 timecounter_init(&adapter->tc, &adapter->cc,
3646 ktime_to_ns(ktime_get_real()));
3652 * e1000_configure - configure the hardware for Rx and Tx
3653 * @adapter: private board structure
3655 static void e1000_configure(struct e1000_adapter *adapter)
3657 struct e1000_ring *rx_ring = adapter->rx_ring;
3659 e1000e_set_rx_mode(adapter->netdev);
3661 e1000_restore_vlan(adapter);
3662 e1000_init_manageability_pt(adapter);
3664 e1000_configure_tx(adapter);
3666 if (adapter->netdev->features & NETIF_F_RXHASH)
3667 e1000e_setup_rss_hash(adapter);
3668 e1000_setup_rctl(adapter);
3669 e1000_configure_rx(adapter);
3670 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3674 * e1000e_power_up_phy - restore link in case the phy was powered down
3675 * @adapter: address of board private structure
3677 * The phy may be powered down to save power and turn off link when the
3678 * driver is unloaded and wake on lan is not enabled (among others)
3679 * *** this routine MUST be followed by a call to e1000e_reset ***
3681 void e1000e_power_up_phy(struct e1000_adapter *adapter)
3683 if (adapter->hw.phy.ops.power_up)
3684 adapter->hw.phy.ops.power_up(&adapter->hw);
3686 adapter->hw.mac.ops.setup_link(&adapter->hw);
3690 * e1000_power_down_phy - Power down the PHY
3692 * Power down the PHY so no link is implied when interface is down.
3693 * The PHY cannot be powered down if management or WoL is active.
3695 static void e1000_power_down_phy(struct e1000_adapter *adapter)
3697 /* WoL is enabled */
3701 if (adapter->hw.phy.ops.power_down)
3702 adapter->hw.phy.ops.power_down(&adapter->hw);
3706 * e1000e_reset - bring the hardware into a known good state
3708 * This function boots the hardware and enables some settings that
3709 * require a configuration cycle of the hardware - those cannot be
3710 * set/changed during runtime. After reset the device needs to be
3711 * properly configured for Rx, Tx etc.
3713 void e1000e_reset(struct e1000_adapter *adapter)
3715 struct e1000_mac_info *mac = &adapter->hw.mac;
3716 struct e1000_fc_info *fc = &adapter->hw.fc;
3717 struct e1000_hw *hw = &adapter->hw;
3718 u32 tx_space, min_tx_space, min_rx_space;
3719 u32 pba = adapter->pba;
3722 /* reset Packet Buffer Allocation to default */
3725 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3726 /* To maintain wire speed transmits, the Tx FIFO should be
3727 * large enough to accommodate two full transmit packets,
3728 * rounded up to the next 1KB and expressed in KB. Likewise,
3729 * the Rx FIFO should be large enough to accommodate at least
3730 * one full receive packet and is similarly rounded up and
3734 /* upper 16 bits has Tx packet buffer allocation size in KB */
3735 tx_space = pba >> 16;
3736 /* lower 16 bits has Rx packet buffer allocation size in KB */
3738 /* the Tx fifo also stores 16 bytes of information about the Tx
3739 * but don't include ethernet FCS because hardware appends it
3741 min_tx_space = (adapter->max_frame_size +
3742 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
3743 min_tx_space = ALIGN(min_tx_space, 1024);
3744 min_tx_space >>= 10;
3745 /* software strips receive CRC, so leave room for it */
3746 min_rx_space = adapter->max_frame_size;
3747 min_rx_space = ALIGN(min_rx_space, 1024);
3748 min_rx_space >>= 10;
3750 /* If current Tx allocation is less than the min Tx FIFO size,
3751 * and the min Tx FIFO size is less than the current Rx FIFO
3752 * allocation, take space away from current Rx allocation
3754 if ((tx_space < min_tx_space) &&
3755 ((min_tx_space - tx_space) < pba)) {
3756 pba -= min_tx_space - tx_space;
3758 /* if short on Rx space, Rx wins and must trump Tx
3761 if (pba < min_rx_space)
3768 /* flow control settings
3770 * The high water mark must be low enough to fit one full frame
3771 * (or the size used for early receive) above it in the Rx FIFO.
3772 * Set it to the lower of:
3773 * - 90% of the Rx FIFO size, and
3774 * - the full Rx FIFO size minus one full frame
3776 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3777 fc->pause_time = 0xFFFF;
3779 fc->pause_time = E1000_FC_PAUSE_TIME;
3780 fc->send_xon = true;
3781 fc->current_mode = fc->requested_mode;
3783 switch (hw->mac.type) {
3785 case e1000_ich10lan:
3786 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3789 fc->high_water = 0x2800;
3790 fc->low_water = fc->high_water - 8;
3795 hwm = min(((pba << 10) * 9 / 10),
3796 ((pba << 10) - adapter->max_frame_size));
3798 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3799 fc->low_water = fc->high_water - 8;
3802 /* Workaround PCH LOM adapter hangs with certain network
3803 * loads. If hangs persist, try disabling Tx flow control.
3805 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3806 fc->high_water = 0x3500;
3807 fc->low_water = 0x1500;
3809 fc->high_water = 0x5000;
3810 fc->low_water = 0x3000;
3812 fc->refresh_time = 0x1000;
3816 fc->refresh_time = 0x0400;
3818 if (adapter->netdev->mtu <= ETH_DATA_LEN) {
3819 fc->high_water = 0x05C20;
3820 fc->low_water = 0x05048;
3821 fc->pause_time = 0x0650;
3827 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
3828 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
3832 /* Alignment of Tx data is on an arbitrary byte boundary with the
3833 * maximum size per Tx descriptor limited only to the transmit
3834 * allocation of the packet buffer minus 96 bytes with an upper
3835 * limit of 24KB due to receive synchronization limitations.
3837 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3840 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
3841 * fit in receive buffer.
3843 if (adapter->itr_setting & 0x3) {
3844 if ((adapter->max_frame_size * 2) > (pba << 10)) {
3845 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3846 dev_info(&adapter->pdev->dev,
3847 "Interrupt Throttle Rate off\n");
3848 adapter->flags2 |= FLAG2_DISABLE_AIM;
3849 e1000e_write_itr(adapter, 0);
3851 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3852 dev_info(&adapter->pdev->dev,
3853 "Interrupt Throttle Rate on\n");
3854 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3855 adapter->itr = 20000;
3856 e1000e_write_itr(adapter, adapter->itr);
3860 /* Allow time for pending master requests to run */
3861 mac->ops.reset_hw(hw);
3863 /* For parts with AMT enabled, let the firmware know
3864 * that the network interface is in control
3866 if (adapter->flags & FLAG_HAS_AMT)
3867 e1000e_get_hw_control(adapter);
3871 if (mac->ops.init_hw(hw))
3872 e_err("Hardware Error\n");
3874 e1000_update_mng_vlan(adapter);
3876 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3877 ew32(VET, ETH_P_8021Q);
3879 e1000e_reset_adaptive(hw);
3881 /* initialize systim and reset the ns time counter */
3882 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
3884 /* Set EEE advertisement as appropriate */
3885 if (adapter->flags2 & FLAG2_HAS_EEE) {
3889 switch (hw->phy.type) {
3890 case e1000_phy_82579:
3891 adv_addr = I82579_EEE_ADVERTISEMENT;
3893 case e1000_phy_i217:
3894 adv_addr = I217_EEE_ADVERTISEMENT;
3897 dev_err(&adapter->pdev->dev,
3898 "Invalid PHY type setting EEE advertisement\n");
3902 ret_val = hw->phy.ops.acquire(hw);
3904 dev_err(&adapter->pdev->dev,
3905 "EEE advertisement - unable to acquire PHY\n");
3909 e1000_write_emi_reg_locked(hw, adv_addr,
3910 hw->dev_spec.ich8lan.eee_disable ?
3911 0 : adapter->eee_advert);
3913 hw->phy.ops.release(hw);
3916 if (!netif_running(adapter->netdev) &&
3917 !test_bit(__E1000_TESTING, &adapter->state)) {
3918 e1000_power_down_phy(adapter);
3922 e1000_get_phy_info(hw);
3924 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3925 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3927 /* speed up time to link by disabling smart power down, ignore
3928 * the return value of this function because there is nothing
3929 * different we would do if it failed
3931 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3932 phy_data &= ~IGP02E1000_PM_SPD;
3933 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3937 int e1000e_up(struct e1000_adapter *adapter)
3939 struct e1000_hw *hw = &adapter->hw;
3941 /* hardware has been reset, we need to reload some things */
3942 e1000_configure(adapter);
3944 clear_bit(__E1000_DOWN, &adapter->state);
3946 if (adapter->msix_entries)
3947 e1000_configure_msix(adapter);
3948 e1000_irq_enable(adapter);
3950 netif_start_queue(adapter->netdev);
3952 /* fire a link change interrupt to start the watchdog */
3953 if (adapter->msix_entries)
3954 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3956 ew32(ICS, E1000_ICS_LSC);
3961 static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3963 struct e1000_hw *hw = &adapter->hw;
3965 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3968 /* flush pending descriptor writebacks to memory */
3969 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3970 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3972 /* execute the writes immediately */
3975 /* due to rare timing issues, write to TIDV/RDTR again to ensure the
3976 * write is successful
3978 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3979 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3981 /* execute the writes immediately */
3985 static void e1000e_update_stats(struct e1000_adapter *adapter);
3987 void e1000e_down(struct e1000_adapter *adapter)
3989 struct net_device *netdev = adapter->netdev;
3990 struct e1000_hw *hw = &adapter->hw;
3993 /* signal that we're down so the interrupt handler does not
3994 * reschedule our watchdog timer
3996 set_bit(__E1000_DOWN, &adapter->state);
3998 /* disable receives in the hardware */
4000 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
4001 ew32(RCTL, rctl & ~E1000_RCTL_EN);
4002 /* flush and sleep below */
4004 netif_stop_queue(netdev);
4006 /* disable transmits in the hardware */
4008 tctl &= ~E1000_TCTL_EN;
4011 /* flush both disables and wait for them to finish */
4013 usleep_range(10000, 20000);
4015 e1000_irq_disable(adapter);
4017 napi_synchronize(&adapter->napi);
4019 del_timer_sync(&adapter->watchdog_timer);
4020 del_timer_sync(&adapter->phy_info_timer);
4022 netif_carrier_off(netdev);
4024 spin_lock(&adapter->stats64_lock);
4025 e1000e_update_stats(adapter);
4026 spin_unlock(&adapter->stats64_lock);
4028 e1000e_flush_descriptors(adapter);
4029 e1000_clean_tx_ring(adapter->tx_ring);
4030 e1000_clean_rx_ring(adapter->rx_ring);
4032 adapter->link_speed = 0;
4033 adapter->link_duplex = 0;
4035 /* Disable Si errata workaround on PCHx for jumbo frame flow */
4036 if ((hw->mac.type >= e1000_pch2lan) &&
4037 (adapter->netdev->mtu > ETH_DATA_LEN) &&
4038 e1000_lv_jumbo_workaround_ich8lan(hw, false))
4039 e_dbg("failed to disable jumbo frame workaround mode\n");
4041 if (!pci_channel_offline(adapter->pdev))
4042 e1000e_reset(adapter);
4044 /* TODO: for power management, we could drop the link and
4045 * pci_disable_device here.
4049 void e1000e_reinit_locked(struct e1000_adapter *adapter)
4052 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4053 usleep_range(1000, 2000);
4054 e1000e_down(adapter);
4056 clear_bit(__E1000_RESETTING, &adapter->state);
4060 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4061 * @cc: cyclecounter structure
4063 static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4065 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4067 struct e1000_hw *hw = &adapter->hw;
4070 /* latch SYSTIMH on read of SYSTIML */
4071 systim = (cycle_t)er32(SYSTIML);
4072 systim |= (cycle_t)er32(SYSTIMH) << 32;
4078 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4079 * @adapter: board private structure to initialize
4081 * e1000_sw_init initializes the Adapter private data structure.
4082 * Fields are initialized based on PCI device information and
4083 * OS network device settings (MTU size).
4085 static int e1000_sw_init(struct e1000_adapter *adapter)
4087 struct net_device *netdev = adapter->netdev;
4089 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
4090 adapter->rx_ps_bsize0 = 128;
4091 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4092 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4093 adapter->tx_ring_count = E1000_DEFAULT_TXD;
4094 adapter->rx_ring_count = E1000_DEFAULT_RXD;
4096 spin_lock_init(&adapter->stats64_lock);
4098 e1000e_set_interrupt_capability(adapter);
4100 if (e1000_alloc_queues(adapter))
4103 /* Setup hardware time stamping cyclecounter */
4104 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
4105 adapter->cc.read = e1000e_cyclecounter_read;
4106 adapter->cc.mask = CLOCKSOURCE_MASK(64);
4107 adapter->cc.mult = 1;
4108 /* cc.shift set in e1000e_get_base_tininca() */
4110 spin_lock_init(&adapter->systim_lock);
4111 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
4114 /* Explicitly disable IRQ since the NIC can be in any state. */
4115 e1000_irq_disable(adapter);
4117 set_bit(__E1000_DOWN, &adapter->state);
4122 * e1000_intr_msi_test - Interrupt Handler
4123 * @irq: interrupt number
4124 * @data: pointer to a network interface device structure
4126 static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
4128 struct net_device *netdev = data;
4129 struct e1000_adapter *adapter = netdev_priv(netdev);
4130 struct e1000_hw *hw = &adapter->hw;
4131 u32 icr = er32(ICR);
4133 e_dbg("icr is %08X\n", icr);
4134 if (icr & E1000_ICR_RXSEQ) {
4135 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
4136 /* Force memory writes to complete before acknowledging the
4137 * interrupt is handled.
4146 * e1000_test_msi_interrupt - Returns 0 for successful test
4147 * @adapter: board private struct
4149 * code flow taken from tg3.c
4151 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
4153 struct net_device *netdev = adapter->netdev;
4154 struct e1000_hw *hw = &adapter->hw;
4157 /* poll_enable hasn't been called yet, so don't need disable */
4158 /* clear any pending events */
4161 /* free the real vector and request a test handler */
4162 e1000_free_irq(adapter);
4163 e1000e_reset_interrupt_capability(adapter);
4165 /* Assume that the test fails, if it succeeds then the test
4166 * MSI irq handler will unset this flag
4168 adapter->flags |= FLAG_MSI_TEST_FAILED;
4170 err = pci_enable_msi(adapter->pdev);
4172 goto msi_test_failed;
4174 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
4175 netdev->name, netdev);
4177 pci_disable_msi(adapter->pdev);
4178 goto msi_test_failed;
4181 /* Force memory writes to complete before enabling and firing an
4186 e1000_irq_enable(adapter);
4188 /* fire an unusual interrupt on the test handler */
4189 ew32(ICS, E1000_ICS_RXSEQ);
4193 e1000_irq_disable(adapter);
4195 rmb(); /* read flags after interrupt has been fired */
4197 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
4198 adapter->int_mode = E1000E_INT_MODE_LEGACY;
4199 e_info("MSI interrupt test failed, using legacy interrupt.\n");
4201 e_dbg("MSI interrupt test succeeded!\n");
4204 free_irq(adapter->pdev->irq, netdev);
4205 pci_disable_msi(adapter->pdev);
4208 e1000e_set_interrupt_capability(adapter);
4209 return e1000_request_irq(adapter);
4213 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4214 * @adapter: board private struct
4216 * code flow taken from tg3.c, called with e1000 interrupts disabled.
4218 static int e1000_test_msi(struct e1000_adapter *adapter)
4223 if (!(adapter->flags & FLAG_MSI_ENABLED))
4226 /* disable SERR in case the MSI write causes a master abort */
4227 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4228 if (pci_cmd & PCI_COMMAND_SERR)
4229 pci_write_config_word(adapter->pdev, PCI_COMMAND,
4230 pci_cmd & ~PCI_COMMAND_SERR);
4232 err = e1000_test_msi_interrupt(adapter);
4234 /* re-enable SERR */
4235 if (pci_cmd & PCI_COMMAND_SERR) {
4236 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4237 pci_cmd |= PCI_COMMAND_SERR;
4238 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
4245 * e1000_open - Called when a network interface is made active
4246 * @netdev: network interface device structure
4248 * Returns 0 on success, negative value on failure
4250 * The open entry point is called when a network interface is made
4251 * active by the system (IFF_UP). At this point all resources needed
4252 * for transmit and receive operations are allocated, the interrupt
4253 * handler is registered with the OS, the watchdog timer is started,
4254 * and the stack is notified that the interface is ready.
4256 static int e1000_open(struct net_device *netdev)
4258 struct e1000_adapter *adapter = netdev_priv(netdev);
4259 struct e1000_hw *hw = &adapter->hw;
4260 struct pci_dev *pdev = adapter->pdev;
4263 /* disallow open during test */
4264 if (test_bit(__E1000_TESTING, &adapter->state))
4267 pm_runtime_get_sync(&pdev->dev);
4269 netif_carrier_off(netdev);
4271 /* allocate transmit descriptors */
4272 err = e1000e_setup_tx_resources(adapter->tx_ring);
4276 /* allocate receive descriptors */
4277 err = e1000e_setup_rx_resources(adapter->rx_ring);
4281 /* If AMT is enabled, let the firmware know that the network
4282 * interface is now open and reset the part to a known state.
4284 if (adapter->flags & FLAG_HAS_AMT) {
4285 e1000e_get_hw_control(adapter);
4286 e1000e_reset(adapter);
4289 e1000e_power_up_phy(adapter);
4291 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4292 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
4293 e1000_update_mng_vlan(adapter);
4295 /* DMA latency requirement to workaround jumbo issue */
4296 pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
4297 PM_QOS_DEFAULT_VALUE);
4299 /* before we allocate an interrupt, we must be ready to handle it.
4300 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
4301 * as soon as we call pci_request_irq, so we have to setup our
4302 * clean_rx handler before we do so.
4304 e1000_configure(adapter);
4306 err = e1000_request_irq(adapter);
4310 /* Work around PCIe errata with MSI interrupts causing some chipsets to
4311 * ignore e1000e MSI messages, which means we need to test our MSI
4314 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
4315 err = e1000_test_msi(adapter);
4317 e_err("Interrupt allocation failed\n");
4322 /* From here on the code is the same as e1000e_up() */
4323 clear_bit(__E1000_DOWN, &adapter->state);
4325 napi_enable(&adapter->napi);
4327 e1000_irq_enable(adapter);
4329 adapter->tx_hang_recheck = false;
4330 netif_start_queue(netdev);
4332 adapter->idle_check = true;
4333 hw->mac.get_link_status = true;
4334 pm_runtime_put(&pdev->dev);
4336 /* fire a link status change interrupt to start the watchdog */
4337 if (adapter->msix_entries)
4338 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
4340 ew32(ICS, E1000_ICS_LSC);
4345 e1000e_release_hw_control(adapter);
4346 e1000_power_down_phy(adapter);
4347 e1000e_free_rx_resources(adapter->rx_ring);
4349 e1000e_free_tx_resources(adapter->tx_ring);
4351 e1000e_reset(adapter);
4352 pm_runtime_put_sync(&pdev->dev);
4358 * e1000_close - Disables a network interface
4359 * @netdev: network interface device structure
4361 * Returns 0, this is not allowed to fail
4363 * The close entry point is called when an interface is de-activated
4364 * by the OS. The hardware is still under the drivers control, but
4365 * needs to be disabled. A global MAC reset is issued to stop the
4366 * hardware, and all transmit and receive resources are freed.
4368 static int e1000_close(struct net_device *netdev)
4370 struct e1000_adapter *adapter = netdev_priv(netdev);
4371 struct pci_dev *pdev = adapter->pdev;
4372 int count = E1000_CHECK_RESET_COUNT;
4374 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
4375 usleep_range(10000, 20000);
4377 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
4379 pm_runtime_get_sync(&pdev->dev);
4381 if (!test_bit(__E1000_DOWN, &adapter->state)) {
4382 e1000e_down(adapter);
4383 e1000_free_irq(adapter);
4386 napi_disable(&adapter->napi);
4388 e1000_power_down_phy(adapter);
4390 e1000e_free_tx_resources(adapter->tx_ring);
4391 e1000e_free_rx_resources(adapter->rx_ring);
4393 /* kill manageability vlan ID if supported, but not if a vlan with
4394 * the same ID is registered on the host OS (let 8021q kill it)
4396 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4397 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
4398 adapter->mng_vlan_id);
4400 /* If AMT is enabled, let the firmware know that the network
4401 * interface is now closed
4403 if ((adapter->flags & FLAG_HAS_AMT) &&
4404 !test_bit(__E1000_TESTING, &adapter->state))
4405 e1000e_release_hw_control(adapter);
4407 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
4409 pm_runtime_put_sync(&pdev->dev);
4415 * e1000_set_mac - Change the Ethernet Address of the NIC
4416 * @netdev: network interface device structure
4417 * @p: pointer to an address structure
4419 * Returns 0 on success, negative on failure
4421 static int e1000_set_mac(struct net_device *netdev, void *p)
4423 struct e1000_adapter *adapter = netdev_priv(netdev);
4424 struct e1000_hw *hw = &adapter->hw;
4425 struct sockaddr *addr = p;
4427 if (!is_valid_ether_addr(addr->sa_data))
4428 return -EADDRNOTAVAIL;
4430 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4431 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4433 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4435 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4436 /* activate the work around */
4437 e1000e_set_laa_state_82571(&adapter->hw, 1);
4439 /* Hold a copy of the LAA in RAR[14] This is done so that
4440 * between the time RAR[0] gets clobbered and the time it
4441 * gets fixed (in e1000_watchdog), the actual LAA is in one
4442 * of the RARs and no incoming packets directed to this port
4443 * are dropped. Eventually the LAA will be in RAR[0] and
4446 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4447 adapter->hw.mac.rar_entry_count - 1);
4454 * e1000e_update_phy_task - work thread to update phy
4455 * @work: pointer to our work struct
4457 * this worker thread exists because we must acquire a
4458 * semaphore to read the phy, which we could msleep while
4459 * waiting for it, and we can't msleep in a timer.
4461 static void e1000e_update_phy_task(struct work_struct *work)
4463 struct e1000_adapter *adapter = container_of(work,
4464 struct e1000_adapter,
4466 struct e1000_hw *hw = &adapter->hw;
4468 if (test_bit(__E1000_DOWN, &adapter->state))
4471 e1000_get_phy_info(hw);
4473 /* Enable EEE on 82579 after link up */
4474 if (hw->phy.type == e1000_phy_82579)
4475 e1000_set_eee_pchlan(hw);
4479 * e1000_update_phy_info - timre call-back to update PHY info
4480 * @data: pointer to adapter cast into an unsigned long
4482 * Need to wait a few seconds after link up to get diagnostic information from
4485 static void e1000_update_phy_info(unsigned long data)
4487 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4489 if (test_bit(__E1000_DOWN, &adapter->state))
4492 schedule_work(&adapter->update_phy_task);
4496 * e1000e_update_phy_stats - Update the PHY statistics counters
4497 * @adapter: board private structure
4499 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4501 static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4503 struct e1000_hw *hw = &adapter->hw;
4507 ret_val = hw->phy.ops.acquire(hw);
4511 /* A page set is expensive so check if already on desired page.
4512 * If not, set to the page with the PHY status registers.
4515 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4519 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4520 ret_val = hw->phy.ops.set_page(hw,
4521 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4526 /* Single Collision Count */
4527 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4528 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4530 adapter->stats.scc += phy_data;
4532 /* Excessive Collision Count */
4533 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4534 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4536 adapter->stats.ecol += phy_data;
4538 /* Multiple Collision Count */
4539 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4540 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4542 adapter->stats.mcc += phy_data;
4544 /* Late Collision Count */
4545 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4546 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4548 adapter->stats.latecol += phy_data;
4550 /* Collision Count - also used for adaptive IFS */
4551 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4552 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4554 hw->mac.collision_delta = phy_data;
4557 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4558 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4560 adapter->stats.dc += phy_data;
4562 /* Transmit with no CRS */
4563 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4564 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4566 adapter->stats.tncrs += phy_data;
4569 hw->phy.ops.release(hw);
4573 * e1000e_update_stats - Update the board statistics counters
4574 * @adapter: board private structure
4576 static void e1000e_update_stats(struct e1000_adapter *adapter)
4578 struct net_device *netdev = adapter->netdev;
4579 struct e1000_hw *hw = &adapter->hw;
4580 struct pci_dev *pdev = adapter->pdev;
4582 /* Prevent stats update while adapter is being reset, or if the pci
4583 * connection is down.
4585 if (adapter->link_speed == 0)
4587 if (pci_channel_offline(pdev))
4590 adapter->stats.crcerrs += er32(CRCERRS);
4591 adapter->stats.gprc += er32(GPRC);
4592 adapter->stats.gorc += er32(GORCL);
4593 er32(GORCH); /* Clear gorc */
4594 adapter->stats.bprc += er32(BPRC);
4595 adapter->stats.mprc += er32(MPRC);
4596 adapter->stats.roc += er32(ROC);
4598 adapter->stats.mpc += er32(MPC);
4600 /* Half-duplex statistics */
4601 if (adapter->link_duplex == HALF_DUPLEX) {
4602 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4603 e1000e_update_phy_stats(adapter);
4605 adapter->stats.scc += er32(SCC);
4606 adapter->stats.ecol += er32(ECOL);
4607 adapter->stats.mcc += er32(MCC);
4608 adapter->stats.latecol += er32(LATECOL);
4609 adapter->stats.dc += er32(DC);
4611 hw->mac.collision_delta = er32(COLC);
4613 if ((hw->mac.type != e1000_82574) &&
4614 (hw->mac.type != e1000_82583))
4615 adapter->stats.tncrs += er32(TNCRS);
4617 adapter->stats.colc += hw->mac.collision_delta;
4620 adapter->stats.xonrxc += er32(XONRXC);
4621 adapter->stats.xontxc += er32(XONTXC);
4622 adapter->stats.xoffrxc += er32(XOFFRXC);
4623 adapter->stats.xofftxc += er32(XOFFTXC);
4624 adapter->stats.gptc += er32(GPTC);
4625 adapter->stats.gotc += er32(GOTCL);
4626 er32(GOTCH); /* Clear gotc */
4627 adapter->stats.rnbc += er32(RNBC);
4628 adapter->stats.ruc += er32(RUC);
4630 adapter->stats.mptc += er32(MPTC);
4631 adapter->stats.bptc += er32(BPTC);
4633 /* used for adaptive IFS */
4635 hw->mac.tx_packet_delta = er32(TPT);
4636 adapter->stats.tpt += hw->mac.tx_packet_delta;
4638 adapter->stats.algnerrc += er32(ALGNERRC);
4639 adapter->stats.rxerrc += er32(RXERRC);
4640 adapter->stats.cexterr += er32(CEXTERR);
4641 adapter->stats.tsctc += er32(TSCTC);
4642 adapter->stats.tsctfc += er32(TSCTFC);
4644 /* Fill out the OS statistics structure */
4645 netdev->stats.multicast = adapter->stats.mprc;
4646 netdev->stats.collisions = adapter->stats.colc;
4650 /* RLEC on some newer hardware can be incorrect so build
4651 * our own version based on RUC and ROC
4653 netdev->stats.rx_errors = adapter->stats.rxerrc +
4654 adapter->stats.crcerrs + adapter->stats.algnerrc +
4655 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
4656 netdev->stats.rx_length_errors = adapter->stats.ruc +
4658 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4659 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4660 netdev->stats.rx_missed_errors = adapter->stats.mpc;
4663 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
4664 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4665 netdev->stats.tx_window_errors = adapter->stats.latecol;
4666 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4668 /* Tx Dropped needs to be maintained elsewhere */
4670 /* Management Stats */
4671 adapter->stats.mgptc += er32(MGTPTC);
4672 adapter->stats.mgprc += er32(MGTPRC);
4673 adapter->stats.mgpdc += er32(MGTPDC);
4675 /* Correctable ECC Errors */
4676 if (hw->mac.type == e1000_pch_lpt) {
4677 u32 pbeccsts = er32(PBECCSTS);
4678 adapter->corr_errors +=
4679 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4680 adapter->uncorr_errors +=
4681 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
4682 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
4687 * e1000_phy_read_status - Update the PHY register status snapshot
4688 * @adapter: board private structure
4690 static void e1000_phy_read_status(struct e1000_adapter *adapter)
4692 struct e1000_hw *hw = &adapter->hw;
4693 struct e1000_phy_regs *phy = &adapter->phy_regs;
4695 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
4696 (er32(STATUS) & E1000_STATUS_LU) &&
4697 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4700 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
4701 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
4702 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
4703 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
4704 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
4705 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
4706 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
4707 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
4709 e_warn("Error reading PHY register\n");
4711 /* Do not read PHY registers if link is not up
4712 * Set values to typical power-on defaults
4714 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4715 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4716 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4718 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4719 ADVERTISE_ALL | ADVERTISE_CSMA);
4721 phy->expansion = EXPANSION_ENABLENPAGE;
4722 phy->ctrl1000 = ADVERTISE_1000FULL;
4724 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4728 static void e1000_print_link_info(struct e1000_adapter *adapter)
4730 struct e1000_hw *hw = &adapter->hw;
4731 u32 ctrl = er32(CTRL);
4733 /* Link status message must follow this format for user tools */
4734 pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4735 adapter->netdev->name, adapter->link_speed,
4736 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
4737 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
4738 (ctrl & E1000_CTRL_RFCE) ? "Rx" :
4739 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
4742 static bool e1000e_has_link(struct e1000_adapter *adapter)
4744 struct e1000_hw *hw = &adapter->hw;
4745 bool link_active = false;
4748 /* get_link_status is set on LSC (link status) interrupt or
4749 * Rx sequence error interrupt. get_link_status will stay
4750 * false until the check_for_link establishes link
4751 * for copper adapters ONLY
4753 switch (hw->phy.media_type) {
4754 case e1000_media_type_copper:
4755 if (hw->mac.get_link_status) {
4756 ret_val = hw->mac.ops.check_for_link(hw);
4757 link_active = !hw->mac.get_link_status;
4762 case e1000_media_type_fiber:
4763 ret_val = hw->mac.ops.check_for_link(hw);
4764 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4766 case e1000_media_type_internal_serdes:
4767 ret_val = hw->mac.ops.check_for_link(hw);
4768 link_active = adapter->hw.mac.serdes_has_link;
4771 case e1000_media_type_unknown:
4775 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4776 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4777 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4778 e_info("Gigabit has been disabled, downgrading speed\n");
4784 static void e1000e_enable_receives(struct e1000_adapter *adapter)
4786 /* make sure the receive unit is started */
4787 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4788 (adapter->flags & FLAG_RESTART_NOW)) {
4789 struct e1000_hw *hw = &adapter->hw;
4790 u32 rctl = er32(RCTL);
4791 ew32(RCTL, rctl | E1000_RCTL_EN);
4792 adapter->flags &= ~FLAG_RESTART_NOW;
4796 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4798 struct e1000_hw *hw = &adapter->hw;
4800 /* With 82574 controllers, PHY needs to be checked periodically
4801 * for hung state and reset, if two calls return true
4803 if (e1000_check_phy_82574(hw))
4804 adapter->phy_hang_count++;
4806 adapter->phy_hang_count = 0;
4808 if (adapter->phy_hang_count > 1) {
4809 adapter->phy_hang_count = 0;
4810 schedule_work(&adapter->reset_task);
4815 * e1000_watchdog - Timer Call-back
4816 * @data: pointer to adapter cast into an unsigned long
4818 static void e1000_watchdog(unsigned long data)
4820 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4822 /* Do the rest outside of interrupt context */
4823 schedule_work(&adapter->watchdog_task);
4825 /* TODO: make this use queue_delayed_work() */
4828 static void e1000_watchdog_task(struct work_struct *work)
4830 struct e1000_adapter *adapter = container_of(work,
4831 struct e1000_adapter,
4833 struct net_device *netdev = adapter->netdev;
4834 struct e1000_mac_info *mac = &adapter->hw.mac;
4835 struct e1000_phy_info *phy = &adapter->hw.phy;
4836 struct e1000_ring *tx_ring = adapter->tx_ring;
4837 struct e1000_hw *hw = &adapter->hw;
4840 if (test_bit(__E1000_DOWN, &adapter->state))
4843 link = e1000e_has_link(adapter);
4844 if ((netif_carrier_ok(netdev)) && link) {
4845 /* Cancel scheduled suspend requests. */
4846 pm_runtime_resume(netdev->dev.parent);
4848 e1000e_enable_receives(adapter);
4852 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4853 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4854 e1000_update_mng_vlan(adapter);
4857 if (!netif_carrier_ok(netdev)) {
4860 /* Cancel scheduled suspend requests. */
4861 pm_runtime_resume(netdev->dev.parent);
4863 /* update snapshot of PHY registers on LSC */
4864 e1000_phy_read_status(adapter);
4865 mac->ops.get_link_up_info(&adapter->hw,
4866 &adapter->link_speed,
4867 &adapter->link_duplex);
4868 e1000_print_link_info(adapter);
4870 /* check if SmartSpeed worked */
4871 e1000e_check_downshift(hw);
4872 if (phy->speed_downgraded)
4874 "Link Speed was downgraded by SmartSpeed\n");
4876 /* On supported PHYs, check for duplex mismatch only
4877 * if link has autonegotiated at 10/100 half
4879 if ((hw->phy.type == e1000_phy_igp_3 ||
4880 hw->phy.type == e1000_phy_bm) &&
4882 (adapter->link_speed == SPEED_10 ||
4883 adapter->link_speed == SPEED_100) &&
4884 (adapter->link_duplex == HALF_DUPLEX)) {
4887 e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
4889 if (!(autoneg_exp & EXPANSION_NWAY))
4890 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
4893 /* adjust timeout factor according to speed/duplex */
4894 adapter->tx_timeout_factor = 1;
4895 switch (adapter->link_speed) {
4898 adapter->tx_timeout_factor = 16;
4902 adapter->tx_timeout_factor = 10;
4906 /* workaround: re-program speed mode bit after
4909 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4912 tarc0 = er32(TARC(0));
4913 tarc0 &= ~SPEED_MODE_BIT;
4914 ew32(TARC(0), tarc0);
4917 /* disable TSO for pcie and 10/100 speeds, to avoid
4918 * some hardware issues
4920 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4921 switch (adapter->link_speed) {
4924 e_info("10/100 speed: disabling TSO\n");
4925 netdev->features &= ~NETIF_F_TSO;
4926 netdev->features &= ~NETIF_F_TSO6;
4929 netdev->features |= NETIF_F_TSO;
4930 netdev->features |= NETIF_F_TSO6;
4938 /* enable transmits in the hardware, need to do this
4939 * after setting TARC(0)
4942 tctl |= E1000_TCTL_EN;
4945 /* Perform any post-link-up configuration before
4946 * reporting link up.
4948 if (phy->ops.cfg_on_link_up)
4949 phy->ops.cfg_on_link_up(hw);
4951 netif_carrier_on(netdev);
4953 if (!test_bit(__E1000_DOWN, &adapter->state))
4954 mod_timer(&adapter->phy_info_timer,
4955 round_jiffies(jiffies + 2 * HZ));
4958 if (netif_carrier_ok(netdev)) {
4959 adapter->link_speed = 0;
4960 adapter->link_duplex = 0;
4961 /* Link status message must follow this format */
4962 pr_info("%s NIC Link is Down\n", adapter->netdev->name);
4963 netif_carrier_off(netdev);
4964 if (!test_bit(__E1000_DOWN, &adapter->state))
4965 mod_timer(&adapter->phy_info_timer,
4966 round_jiffies(jiffies + 2 * HZ));
4968 /* The link is lost so the controller stops DMA.
4969 * If there is queued Tx work that cannot be done
4970 * or if on an 8000ES2LAN which requires a Rx packet
4971 * buffer work-around on link down event, reset the
4972 * controller to flush the Tx/Rx packet buffers.
4973 * (Do the reset outside of interrupt context).
4975 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
4976 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
4977 adapter->flags |= FLAG_RESTART_NOW;
4979 pm_schedule_suspend(netdev->dev.parent,
4985 spin_lock(&adapter->stats64_lock);
4986 e1000e_update_stats(adapter);
4988 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4989 adapter->tpt_old = adapter->stats.tpt;
4990 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4991 adapter->colc_old = adapter->stats.colc;
4993 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4994 adapter->gorc_old = adapter->stats.gorc;
4995 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4996 adapter->gotc_old = adapter->stats.gotc;
4997 spin_unlock(&adapter->stats64_lock);
4999 if (adapter->flags & FLAG_RESTART_NOW) {
5000 schedule_work(&adapter->reset_task);
5001 /* return immediately since reset is imminent */
5005 e1000e_update_adaptive(&adapter->hw);
5007 /* Simple mode for Interrupt Throttle Rate (ITR) */
5008 if (adapter->itr_setting == 4) {
5009 /* Symmetric Tx/Rx gets a reduced ITR=2000;
5010 * Total asymmetrical Tx or Rx gets ITR=8000;
5011 * everyone else is between 2000-8000.
5013 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
5014 u32 dif = (adapter->gotc > adapter->gorc ?
5015 adapter->gotc - adapter->gorc :
5016 adapter->gorc - adapter->gotc) / 10000;
5017 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
5019 e1000e_write_itr(adapter, itr);
5022 /* Cause software interrupt to ensure Rx ring is cleaned */
5023 if (adapter->msix_entries)
5024 ew32(ICS, adapter->rx_ring->ims_val);
5026 ew32(ICS, E1000_ICS_RXDMT0);
5028 /* flush pending descriptors to memory before detecting Tx hang */
5029 e1000e_flush_descriptors(adapter);
5031 /* Force detection of hung controller every watchdog period */
5032 adapter->detect_tx_hung = true;
5034 /* With 82571 controllers, LAA may be overwritten due to controller
5035 * reset from the other port. Set the appropriate LAA in RAR[0]
5037 if (e1000e_get_laa_state_82571(hw))
5038 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
5040 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
5041 e1000e_check_82574_phy_workaround(adapter);
5043 /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */
5044 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
5045 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
5046 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
5048 adapter->rx_hwtstamp_cleared++;
5050 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
5054 /* Reset the timer */
5055 if (!test_bit(__E1000_DOWN, &adapter->state))
5056 mod_timer(&adapter->watchdog_timer,
5057 round_jiffies(jiffies + 2 * HZ));
5060 #define E1000_TX_FLAGS_CSUM 0x00000001
5061 #define E1000_TX_FLAGS_VLAN 0x00000002
5062 #define E1000_TX_FLAGS_TSO 0x00000004
5063 #define E1000_TX_FLAGS_IPV4 0x00000008
5064 #define E1000_TX_FLAGS_NO_FCS 0x00000010
5065 #define E1000_TX_FLAGS_HWTSTAMP 0x00000020
5066 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
5067 #define E1000_TX_FLAGS_VLAN_SHIFT 16
5069 static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5071 struct e1000_context_desc *context_desc;
5072 struct e1000_buffer *buffer_info;
5076 u8 ipcss, ipcso, tucss, tucso, hdr_len;
5078 if (!skb_is_gso(skb))
5081 if (skb_header_cloned(skb)) {
5082 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5088 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5089 mss = skb_shinfo(skb)->gso_size;
5090 if (skb->protocol == htons(ETH_P_IP)) {
5091 struct iphdr *iph = ip_hdr(skb);
5094 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
5096 cmd_length = E1000_TXD_CMD_IP;
5097 ipcse = skb_transport_offset(skb) - 1;
5098 } else if (skb_is_gso_v6(skb)) {
5099 ipv6_hdr(skb)->payload_len = 0;
5100 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5101 &ipv6_hdr(skb)->daddr,
5105 ipcss = skb_network_offset(skb);
5106 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
5107 tucss = skb_transport_offset(skb);
5108 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
5110 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
5111 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
5113 i = tx_ring->next_to_use;
5114 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5115 buffer_info = &tx_ring->buffer_info[i];
5117 context_desc->lower_setup.ip_fields.ipcss = ipcss;
5118 context_desc->lower_setup.ip_fields.ipcso = ipcso;
5119 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
5120 context_desc->upper_setup.tcp_fields.tucss = tucss;
5121 context_desc->upper_setup.tcp_fields.tucso = tucso;
5122 context_desc->upper_setup.tcp_fields.tucse = 0;
5123 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
5124 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
5125 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
5127 buffer_info->time_stamp = jiffies;
5128 buffer_info->next_to_watch = i;
5131 if (i == tx_ring->count)
5133 tx_ring->next_to_use = i;
5138 static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5140 struct e1000_adapter *adapter = tx_ring->adapter;
5141 struct e1000_context_desc *context_desc;
5142 struct e1000_buffer *buffer_info;
5145 u32 cmd_len = E1000_TXD_CMD_DEXT;
5148 if (skb->ip_summed != CHECKSUM_PARTIAL)
5151 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5152 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
5154 protocol = skb->protocol;
5157 case cpu_to_be16(ETH_P_IP):
5158 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5159 cmd_len |= E1000_TXD_CMD_TCP;
5161 case cpu_to_be16(ETH_P_IPV6):
5162 /* XXX not handling all IPV6 headers */
5163 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5164 cmd_len |= E1000_TXD_CMD_TCP;
5167 if (unlikely(net_ratelimit()))
5168 e_warn("checksum_partial proto=%x!\n",
5169 be16_to_cpu(protocol));
5173 css = skb_checksum_start_offset(skb);
5175 i = tx_ring->next_to_use;
5176 buffer_info = &tx_ring->buffer_info[i];
5177 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5179 context_desc->lower_setup.ip_config = 0;
5180 context_desc->upper_setup.tcp_fields.tucss = css;
5181 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
5182 context_desc->upper_setup.tcp_fields.tucse = 0;
5183 context_desc->tcp_seg_setup.data = 0;
5184 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
5186 buffer_info->time_stamp = jiffies;
5187 buffer_info->next_to_watch = i;
5190 if (i == tx_ring->count)
5192 tx_ring->next_to_use = i;
5197 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
5198 unsigned int first, unsigned int max_per_txd,
5199 unsigned int nr_frags)
5201 struct e1000_adapter *adapter = tx_ring->adapter;
5202 struct pci_dev *pdev = adapter->pdev;
5203 struct e1000_buffer *buffer_info;
5204 unsigned int len = skb_headlen(skb);
5205 unsigned int offset = 0, size, count = 0, i;
5206 unsigned int f, bytecount, segs;
5208 i = tx_ring->next_to_use;
5211 buffer_info = &tx_ring->buffer_info[i];
5212 size = min(len, max_per_txd);
5214 buffer_info->length = size;
5215 buffer_info->time_stamp = jiffies;
5216 buffer_info->next_to_watch = i;
5217 buffer_info->dma = dma_map_single(&pdev->dev,
5219 size, DMA_TO_DEVICE);
5220 buffer_info->mapped_as_page = false;
5221 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5230 if (i == tx_ring->count)
5235 for (f = 0; f < nr_frags; f++) {
5236 const struct skb_frag_struct *frag;
5238 frag = &skb_shinfo(skb)->frags[f];
5239 len = skb_frag_size(frag);
5244 if (i == tx_ring->count)
5247 buffer_info = &tx_ring->buffer_info[i];
5248 size = min(len, max_per_txd);
5250 buffer_info->length = size;
5251 buffer_info->time_stamp = jiffies;
5252 buffer_info->next_to_watch = i;
5253 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
5256 buffer_info->mapped_as_page = true;
5257 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5266 segs = skb_shinfo(skb)->gso_segs ? : 1;
5267 /* multiply data chunks by size of headers */
5268 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
5270 tx_ring->buffer_info[i].skb = skb;
5271 tx_ring->buffer_info[i].segs = segs;
5272 tx_ring->buffer_info[i].bytecount = bytecount;
5273 tx_ring->buffer_info[first].next_to_watch = i;
5278 dev_err(&pdev->dev, "Tx DMA map failed\n");
5279 buffer_info->dma = 0;
5285 i += tx_ring->count;
5287 buffer_info = &tx_ring->buffer_info[i];
5288 e1000_put_txbuf(tx_ring, buffer_info);
5294 static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5296 struct e1000_adapter *adapter = tx_ring->adapter;
5297 struct e1000_tx_desc *tx_desc = NULL;
5298 struct e1000_buffer *buffer_info;
5299 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
5302 if (tx_flags & E1000_TX_FLAGS_TSO) {
5303 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
5305 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5307 if (tx_flags & E1000_TX_FLAGS_IPV4)
5308 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
5311 if (tx_flags & E1000_TX_FLAGS_CSUM) {
5312 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5313 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5316 if (tx_flags & E1000_TX_FLAGS_VLAN) {
5317 txd_lower |= E1000_TXD_CMD_VLE;
5318 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
5321 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5322 txd_lower &= ~(E1000_TXD_CMD_IFCS);
5324 if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
5325 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5326 txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
5329 i = tx_ring->next_to_use;
5332 buffer_info = &tx_ring->buffer_info[i];
5333 tx_desc = E1000_TX_DESC(*tx_ring, i);
5334 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
5335 tx_desc->lower.data = cpu_to_le32(txd_lower |
5336 buffer_info->length);
5337 tx_desc->upper.data = cpu_to_le32(txd_upper);
5340 if (i == tx_ring->count)
5342 } while (--count > 0);
5344 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
5346 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
5347 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5348 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
5350 /* Force memory writes to complete before letting h/w
5351 * know there are new descriptors to fetch. (Only
5352 * applicable for weak-ordered memory model archs,
5357 tx_ring->next_to_use = i;
5359 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5360 e1000e_update_tdt_wa(tx_ring, i);
5362 writel(i, tx_ring->tail);
5364 /* we need this if more than one processor can write to our tail
5365 * at a time, it synchronizes IO on IA64/Altix systems
5370 #define MINIMUM_DHCP_PACKET_SIZE 282
5371 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5372 struct sk_buff *skb)
5374 struct e1000_hw *hw = &adapter->hw;
5377 if (vlan_tx_tag_present(skb) &&
5378 !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5379 (adapter->hw.mng_cookie.status &
5380 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
5383 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
5386 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
5390 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
5393 if (ip->protocol != IPPROTO_UDP)
5396 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
5397 if (ntohs(udp->dest) != 67)
5400 offset = (u8 *)udp + 8 - skb->data;
5401 length = skb->len - offset;
5402 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
5408 static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5410 struct e1000_adapter *adapter = tx_ring->adapter;
5412 netif_stop_queue(adapter->netdev);
5413 /* Herbert's original patch had:
5414 * smp_mb__after_netif_stop_queue();
5415 * but since that doesn't exist yet, just open code it.
5419 /* We need to check again in a case another CPU has just
5420 * made room available.
5422 if (e1000_desc_unused(tx_ring) < size)
5426 netif_start_queue(adapter->netdev);
5427 ++adapter->restart_queue;
5431 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5433 BUG_ON(size > tx_ring->count);
5435 if (e1000_desc_unused(tx_ring) >= size)
5437 return __e1000_maybe_stop_tx(tx_ring, size);
5440 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5441 struct net_device *netdev)
5443 struct e1000_adapter *adapter = netdev_priv(netdev);
5444 struct e1000_ring *tx_ring = adapter->tx_ring;
5446 unsigned int tx_flags = 0;
5447 unsigned int len = skb_headlen(skb);
5448 unsigned int nr_frags;
5454 if (test_bit(__E1000_DOWN, &adapter->state)) {
5455 dev_kfree_skb_any(skb);
5456 return NETDEV_TX_OK;
5459 if (skb->len <= 0) {
5460 dev_kfree_skb_any(skb);
5461 return NETDEV_TX_OK;
5464 /* The minimum packet size with TCTL.PSP set is 17 bytes so
5465 * pad skb in order to meet this minimum size requirement
5467 if (unlikely(skb->len < 17)) {
5468 if (skb_pad(skb, 17 - skb->len))
5469 return NETDEV_TX_OK;
5471 skb_set_tail_pointer(skb, 17);
5474 mss = skb_shinfo(skb)->gso_size;
5478 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
5479 * points to just header, pull a few bytes of payload from
5480 * frags into skb->data
5482 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5483 /* we do this workaround for ES2LAN, but it is un-necessary,
5484 * avoiding it could save a lot of cycles
5486 if (skb->data_len && (hdr_len == len)) {
5487 unsigned int pull_size;
5489 pull_size = min_t(unsigned int, 4, skb->data_len);
5490 if (!__pskb_pull_tail(skb, pull_size)) {
5491 e_err("__pskb_pull_tail failed.\n");
5492 dev_kfree_skb_any(skb);
5493 return NETDEV_TX_OK;
5495 len = skb_headlen(skb);
5499 /* reserve a descriptor for the offload context */
5500 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5504 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5506 nr_frags = skb_shinfo(skb)->nr_frags;
5507 for (f = 0; f < nr_frags; f++)
5508 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5509 adapter->tx_fifo_limit);
5511 if (adapter->hw.mac.tx_pkt_filtering)
5512 e1000_transfer_dhcp_info(adapter, skb);
5514 /* need: count + 2 desc gap to keep tail from touching
5515 * head, otherwise try next time
5517 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5518 return NETDEV_TX_BUSY;
5520 if (vlan_tx_tag_present(skb)) {
5521 tx_flags |= E1000_TX_FLAGS_VLAN;
5522 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5525 first = tx_ring->next_to_use;
5527 tso = e1000_tso(tx_ring, skb);
5529 dev_kfree_skb_any(skb);
5530 return NETDEV_TX_OK;
5534 tx_flags |= E1000_TX_FLAGS_TSO;
5535 else if (e1000_tx_csum(tx_ring, skb))
5536 tx_flags |= E1000_TX_FLAGS_CSUM;
5538 /* Old method was to assume IPv4 packet by default if TSO was enabled.
5539 * 82571 hardware supports TSO capabilities for IPv6 as well...
5540 * no longer assume, we must.
5542 if (skb->protocol == htons(ETH_P_IP))
5543 tx_flags |= E1000_TX_FLAGS_IPV4;
5545 if (unlikely(skb->no_fcs))
5546 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5548 /* if count is 0 then mapping error has occurred */
5549 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5552 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5553 !adapter->tx_hwtstamp_skb)) {
5554 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5555 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5556 adapter->tx_hwtstamp_skb = skb_get(skb);
5557 schedule_work(&adapter->tx_hwtstamp_work);
5559 skb_tx_timestamp(skb);
5562 netdev_sent_queue(netdev, skb->len);
5563 e1000_tx_queue(tx_ring, tx_flags, count);
5564 /* Make sure there is space in the ring for the next send. */
5565 e1000_maybe_stop_tx(tx_ring,
5567 DIV_ROUND_UP(PAGE_SIZE,
5568 adapter->tx_fifo_limit) + 2));
5570 dev_kfree_skb_any(skb);
5571 tx_ring->buffer_info[first].time_stamp = 0;
5572 tx_ring->next_to_use = first;
5575 return NETDEV_TX_OK;
5579 * e1000_tx_timeout - Respond to a Tx Hang
5580 * @netdev: network interface device structure
5582 static void e1000_tx_timeout(struct net_device *netdev)
5584 struct e1000_adapter *adapter = netdev_priv(netdev);
5586 /* Do the reset outside of interrupt context */
5587 adapter->tx_timeout_count++;
5588 schedule_work(&adapter->reset_task);
5591 static void e1000_reset_task(struct work_struct *work)
5593 struct e1000_adapter *adapter;
5594 adapter = container_of(work, struct e1000_adapter, reset_task);
5596 /* don't run the task if already down */
5597 if (test_bit(__E1000_DOWN, &adapter->state))
5600 if (!(adapter->flags & FLAG_RESTART_NOW)) {
5601 e1000e_dump(adapter);
5602 e_err("Reset adapter unexpectedly\n");
5604 e1000e_reinit_locked(adapter);
5608 * e1000_get_stats64 - Get System Network Statistics
5609 * @netdev: network interface device structure
5610 * @stats: rtnl_link_stats64 pointer
5612 * Returns the address of the device statistics structure.
5614 struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5615 struct rtnl_link_stats64 *stats)
5617 struct e1000_adapter *adapter = netdev_priv(netdev);
5619 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5620 spin_lock(&adapter->stats64_lock);
5621 e1000e_update_stats(adapter);
5622 /* Fill out the OS statistics structure */
5623 stats->rx_bytes = adapter->stats.gorc;
5624 stats->rx_packets = adapter->stats.gprc;
5625 stats->tx_bytes = adapter->stats.gotc;
5626 stats->tx_packets = adapter->stats.gptc;
5627 stats->multicast = adapter->stats.mprc;
5628 stats->collisions = adapter->stats.colc;
5632 /* RLEC on some newer hardware can be incorrect so build
5633 * our own version based on RUC and ROC
5635 stats->rx_errors = adapter->stats.rxerrc +
5636 adapter->stats.crcerrs + adapter->stats.algnerrc +
5637 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
5638 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
5639 stats->rx_crc_errors = adapter->stats.crcerrs;
5640 stats->rx_frame_errors = adapter->stats.algnerrc;
5641 stats->rx_missed_errors = adapter->stats.mpc;
5644 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
5645 stats->tx_aborted_errors = adapter->stats.ecol;
5646 stats->tx_window_errors = adapter->stats.latecol;
5647 stats->tx_carrier_errors = adapter->stats.tncrs;
5649 /* Tx Dropped needs to be maintained elsewhere */
5651 spin_unlock(&adapter->stats64_lock);
5656 * e1000_change_mtu - Change the Maximum Transfer Unit
5657 * @netdev: network interface device structure
5658 * @new_mtu: new value for maximum frame size
5660 * Returns 0 on success, negative on failure
5662 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5664 struct e1000_adapter *adapter = netdev_priv(netdev);
5665 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5667 /* Jumbo frame support */
5668 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5669 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5670 e_err("Jumbo Frames not supported.\n");
5674 /* Supported frame sizes */
5675 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5676 (max_frame > adapter->max_hw_frame_size)) {
5677 e_err("Unsupported MTU setting\n");
5681 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
5682 if ((adapter->hw.mac.type >= e1000_pch2lan) &&
5683 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5684 (new_mtu > ETH_DATA_LEN)) {
5685 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
5689 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5690 usleep_range(1000, 2000);
5691 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
5692 adapter->max_frame_size = max_frame;
5693 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5694 netdev->mtu = new_mtu;
5695 if (netif_running(netdev))
5696 e1000e_down(adapter);
5698 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5699 * means we reserve 2 more, this pushes us to allocate from the next
5701 * i.e. RXBUFFER_2048 --> size-4096 slab
5702 * However with the new *_jumbo_rx* routines, jumbo receives will use
5706 if (max_frame <= 2048)
5707 adapter->rx_buffer_len = 2048;
5709 adapter->rx_buffer_len = 4096;
5711 /* adjust allocation if LPE protects us, and we aren't using SBP */
5712 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5713 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5714 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5717 if (netif_running(netdev))
5720 e1000e_reset(adapter);
5722 clear_bit(__E1000_RESETTING, &adapter->state);
5727 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5730 struct e1000_adapter *adapter = netdev_priv(netdev);
5731 struct mii_ioctl_data *data = if_mii(ifr);
5733 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5738 data->phy_id = adapter->hw.phy.addr;
5741 e1000_phy_read_status(adapter);
5743 switch (data->reg_num & 0x1F) {
5745 data->val_out = adapter->phy_regs.bmcr;
5748 data->val_out = adapter->phy_regs.bmsr;
5751 data->val_out = (adapter->hw.phy.id >> 16);
5754 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5757 data->val_out = adapter->phy_regs.advertise;
5760 data->val_out = adapter->phy_regs.lpa;
5763 data->val_out = adapter->phy_regs.expansion;
5766 data->val_out = adapter->phy_regs.ctrl1000;
5769 data->val_out = adapter->phy_regs.stat1000;
5772 data->val_out = adapter->phy_regs.estatus;
5786 * e1000e_hwtstamp_ioctl - control hardware time stamping
5787 * @netdev: network interface device structure
5788 * @ifreq: interface request
5790 * Outgoing time stamping can be enabled and disabled. Play nice and
5791 * disable it when requested, although it shouldn't cause any overhead
5792 * when no packet needs it. At most one packet in the queue may be
5793 * marked for time stamping, otherwise it would be impossible to tell
5794 * for sure to which packet the hardware time stamp belongs.
5796 * Incoming time stamping has to be configured via the hardware filters.
5797 * Not all combinations are supported, in particular event type has to be
5798 * specified. Matching the kind of event packet is not supported, with the
5799 * exception of "all V2 events regardless of level 2 or 4".
5801 static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
5803 struct e1000_adapter *adapter = netdev_priv(netdev);
5804 struct hwtstamp_config config;
5807 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5810 ret_val = e1000e_config_hwtstamp(adapter, &config);
5814 switch (config.rx_filter) {
5815 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5816 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5817 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5818 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5819 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5820 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5821 /* With V2 type filters which specify a Sync or Delay Request,
5822 * Path Delay Request/Response messages are also time stamped
5823 * by hardware so notify the caller the requested packets plus
5824 * some others are time stamped.
5826 config.rx_filter = HWTSTAMP_FILTER_SOME;
5832 return copy_to_user(ifr->ifr_data, &config,
5833 sizeof(config)) ? -EFAULT : 0;
5836 static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
5838 struct e1000_adapter *adapter = netdev_priv(netdev);
5840 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
5841 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
5844 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5850 return e1000_mii_ioctl(netdev, ifr, cmd);
5852 return e1000e_hwtstamp_set(netdev, ifr);
5854 return e1000e_hwtstamp_get(netdev, ifr);
5860 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5862 struct e1000_hw *hw = &adapter->hw;
5864 u16 phy_reg, wuc_enable;
5867 /* copy MAC RARs to PHY RARs */
5868 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5870 retval = hw->phy.ops.acquire(hw);
5872 e_err("Could not acquire PHY\n");
5876 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
5877 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5881 /* copy MAC MTA to PHY MTA - only needed for pchlan */
5882 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5883 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5884 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5885 (u16)(mac_reg & 0xFFFF));
5886 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5887 (u16)((mac_reg >> 16) & 0xFFFF));
5890 /* configure PHY Rx Control register */
5891 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
5892 mac_reg = er32(RCTL);
5893 if (mac_reg & E1000_RCTL_UPE)
5894 phy_reg |= BM_RCTL_UPE;
5895 if (mac_reg & E1000_RCTL_MPE)
5896 phy_reg |= BM_RCTL_MPE;
5897 phy_reg &= ~(BM_RCTL_MO_MASK);
5898 if (mac_reg & E1000_RCTL_MO_3)
5899 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5900 << BM_RCTL_MO_SHIFT);
5901 if (mac_reg & E1000_RCTL_BAM)
5902 phy_reg |= BM_RCTL_BAM;
5903 if (mac_reg & E1000_RCTL_PMCF)
5904 phy_reg |= BM_RCTL_PMCF;
5905 mac_reg = er32(CTRL);
5906 if (mac_reg & E1000_CTRL_RFCE)
5907 phy_reg |= BM_RCTL_RFCE;
5908 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5910 /* enable PHY wakeup in MAC register */
5912 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5914 /* configure and enable PHY wakeup in PHY registers */
5915 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5916 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5918 /* activate PHY wakeup */
5919 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5920 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5922 e_err("Could not set PHY Host Wakeup bit\n");
5924 hw->phy.ops.release(hw);
5929 static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5931 struct net_device *netdev = pci_get_drvdata(pdev);
5932 struct e1000_adapter *adapter = netdev_priv(netdev);
5933 struct e1000_hw *hw = &adapter->hw;
5934 u32 ctrl, ctrl_ext, rctl, status;
5935 /* Runtime suspend should only enable wakeup for link changes */
5936 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5939 netif_device_detach(netdev);
5941 if (netif_running(netdev)) {
5942 int count = E1000_CHECK_RESET_COUNT;
5944 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
5945 usleep_range(10000, 20000);
5947 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5948 e1000e_down(adapter);
5949 e1000_free_irq(adapter);
5951 e1000e_reset_interrupt_capability(adapter);
5953 status = er32(STATUS);
5954 if (status & E1000_STATUS_LU)
5955 wufc &= ~E1000_WUFC_LNKC;
5958 e1000_setup_rctl(adapter);
5959 e1000e_set_rx_mode(netdev);
5961 /* turn on all-multi mode if wake on multicast is enabled */
5962 if (wufc & E1000_WUFC_MC) {
5964 rctl |= E1000_RCTL_MPE;
5969 ctrl |= E1000_CTRL_ADVD3WUC;
5970 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5971 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5974 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5975 adapter->hw.phy.media_type ==
5976 e1000_media_type_internal_serdes) {
5977 /* keep the laser running in D3 */
5978 ctrl_ext = er32(CTRL_EXT);
5979 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5980 ew32(CTRL_EXT, ctrl_ext);
5983 if (adapter->flags & FLAG_IS_ICH)
5984 e1000_suspend_workarounds_ich8lan(&adapter->hw);
5986 /* Allow time for pending master requests to run */
5987 e1000e_disable_pcie_master(&adapter->hw);
5989 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5990 /* enable wakeup by the PHY */
5991 retval = e1000_init_phy_wakeup(adapter, wufc);
5995 /* enable wakeup by the MAC */
5997 ew32(WUC, E1000_WUC_PME_EN);
6004 if (adapter->hw.phy.type == e1000_phy_igp_3)
6005 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
6007 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6008 * would have already happened in close and is redundant.
6010 e1000e_release_hw_control(adapter);
6012 pci_clear_master(pdev);
6014 /* The pci-e switch on some quad port adapters will report a
6015 * correctable error when the MAC transitions from D0 to D3. To
6016 * prevent this we need to mask off the correctable errors on the
6017 * downstream port of the pci-e switch.
6019 * We don't have the associated upstream bridge while assigning
6020 * the PCI device into guest. For example, the KVM on power is
6023 if (adapter->flags & FLAG_IS_QUAD_PORT) {
6024 struct pci_dev *us_dev = pdev->bus->self;
6030 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
6031 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6032 (devctl & ~PCI_EXP_DEVCTL_CERE));
6034 pci_save_state(pdev);
6035 pci_prepare_to_sleep(pdev);
6037 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
6044 * e1000e_disable_aspm - Disable ASPM states
6045 * @pdev: pointer to PCI device struct
6046 * @state: bit-mask of ASPM states to disable
6048 * Some devices *must* have certain ASPM states disabled per hardware errata.
6050 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6052 struct pci_dev *parent = pdev->bus->self;
6053 u16 aspm_dis_mask = 0;
6054 u16 pdev_aspmc, parent_aspmc;
6057 case PCIE_LINK_STATE_L0S:
6058 case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
6059 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
6060 /* fall-through - can't have L1 without L0s */
6061 case PCIE_LINK_STATE_L1:
6062 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
6068 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6069 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6072 pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
6074 parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6077 /* Nothing to do if the ASPM states to be disabled already are */
6078 if (!(pdev_aspmc & aspm_dis_mask) &&
6079 (!parent || !(parent_aspmc & aspm_dis_mask)))
6082 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6083 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
6085 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
6088 #ifdef CONFIG_PCIEASPM
6089 pci_disable_link_state_locked(pdev, state);
6091 /* Double-check ASPM control. If not disabled by the above, the
6092 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
6093 * not enabled); override by writing PCI config space directly.
6095 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6096 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6098 if (!(aspm_dis_mask & pdev_aspmc))
6102 /* Both device and parent should have the same ASPM setting.
6103 * Disable ASPM in downstream component first and then upstream.
6105 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
6108 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
6113 static bool e1000e_pm_ready(struct e1000_adapter *adapter)
6115 return !!adapter->tx_ring->buffer_info;
6118 static int __e1000_resume(struct pci_dev *pdev)
6120 struct net_device *netdev = pci_get_drvdata(pdev);
6121 struct e1000_adapter *adapter = netdev_priv(netdev);
6122 struct e1000_hw *hw = &adapter->hw;
6123 u16 aspm_disable_flag = 0;
6126 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6127 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6128 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6129 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6130 if (aspm_disable_flag)
6131 e1000e_disable_aspm(pdev, aspm_disable_flag);
6133 pci_set_master(pdev);
6135 e1000e_set_interrupt_capability(adapter);
6136 if (netif_running(netdev)) {
6137 err = e1000_request_irq(adapter);
6142 if (hw->mac.type >= e1000_pch2lan)
6143 e1000_resume_workarounds_pchlan(&adapter->hw);
6145 e1000e_power_up_phy(adapter);
6147 /* report the system wakeup cause from S3/S4 */
6148 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
6151 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
6153 e_info("PHY Wakeup cause - %s\n",
6154 phy_data & E1000_WUS_EX ? "Unicast Packet" :
6155 phy_data & E1000_WUS_MC ? "Multicast Packet" :
6156 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
6157 phy_data & E1000_WUS_MAG ? "Magic Packet" :
6158 phy_data & E1000_WUS_LNKC ?
6159 "Link Status Change" : "other");
6161 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6163 u32 wus = er32(WUS);
6165 e_info("MAC Wakeup cause - %s\n",
6166 wus & E1000_WUS_EX ? "Unicast Packet" :
6167 wus & E1000_WUS_MC ? "Multicast Packet" :
6168 wus & E1000_WUS_BC ? "Broadcast Packet" :
6169 wus & E1000_WUS_MAG ? "Magic Packet" :
6170 wus & E1000_WUS_LNKC ? "Link Status Change" :
6176 e1000e_reset(adapter);
6178 e1000_init_manageability_pt(adapter);
6180 if (netif_running(netdev))
6183 netif_device_attach(netdev);
6185 /* If the controller has AMT, do not set DRV_LOAD until the interface
6186 * is up. For all other cases, let the f/w know that the h/w is now
6187 * under the control of the driver.
6189 if (!(adapter->flags & FLAG_HAS_AMT))
6190 e1000e_get_hw_control(adapter);
6195 #ifdef CONFIG_PM_SLEEP
6196 static int e1000_suspend(struct device *dev)
6198 struct pci_dev *pdev = to_pci_dev(dev);
6200 return __e1000_shutdown(pdev, false);
6203 static int e1000_resume(struct device *dev)
6205 struct pci_dev *pdev = to_pci_dev(dev);
6206 struct net_device *netdev = pci_get_drvdata(pdev);
6207 struct e1000_adapter *adapter = netdev_priv(netdev);
6209 if (e1000e_pm_ready(adapter))
6210 adapter->idle_check = true;
6212 return __e1000_resume(pdev);
6214 #endif /* CONFIG_PM_SLEEP */
6216 #ifdef CONFIG_PM_RUNTIME
6217 static int e1000_runtime_suspend(struct device *dev)
6219 struct pci_dev *pdev = to_pci_dev(dev);
6220 struct net_device *netdev = pci_get_drvdata(pdev);
6221 struct e1000_adapter *adapter = netdev_priv(netdev);
6223 if (!e1000e_pm_ready(adapter))
6226 return __e1000_shutdown(pdev, true);
6229 static int e1000_idle(struct device *dev)
6231 struct pci_dev *pdev = to_pci_dev(dev);
6232 struct net_device *netdev = pci_get_drvdata(pdev);
6233 struct e1000_adapter *adapter = netdev_priv(netdev);
6235 if (!e1000e_pm_ready(adapter))
6238 if (adapter->idle_check) {
6239 adapter->idle_check = false;
6240 if (!e1000e_has_link(adapter))
6241 pm_schedule_suspend(dev, MSEC_PER_SEC);
6247 static int e1000_runtime_resume(struct device *dev)
6249 struct pci_dev *pdev = to_pci_dev(dev);
6250 struct net_device *netdev = pci_get_drvdata(pdev);
6251 struct e1000_adapter *adapter = netdev_priv(netdev);
6253 if (!e1000e_pm_ready(adapter))
6256 adapter->idle_check = !dev->power.runtime_auto;
6257 return __e1000_resume(pdev);
6259 #endif /* CONFIG_PM_RUNTIME */
6260 #endif /* CONFIG_PM */
6262 static void e1000_shutdown(struct pci_dev *pdev)
6264 __e1000_shutdown(pdev, false);
6267 #ifdef CONFIG_NET_POLL_CONTROLLER
6269 static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
6271 struct net_device *netdev = data;
6272 struct e1000_adapter *adapter = netdev_priv(netdev);
6274 if (adapter->msix_entries) {
6275 int vector, msix_irq;
6278 msix_irq = adapter->msix_entries[vector].vector;
6279 disable_irq(msix_irq);
6280 e1000_intr_msix_rx(msix_irq, netdev);
6281 enable_irq(msix_irq);
6284 msix_irq = adapter->msix_entries[vector].vector;
6285 disable_irq(msix_irq);
6286 e1000_intr_msix_tx(msix_irq, netdev);
6287 enable_irq(msix_irq);
6290 msix_irq = adapter->msix_entries[vector].vector;
6291 disable_irq(msix_irq);
6292 e1000_msix_other(msix_irq, netdev);
6293 enable_irq(msix_irq);
6301 * @netdev: network interface device structure
6303 * Polling 'interrupt' - used by things like netconsole to send skbs
6304 * without having to re-enable interrupts. It's not called while
6305 * the interrupt routine is executing.
6307 static void e1000_netpoll(struct net_device *netdev)
6309 struct e1000_adapter *adapter = netdev_priv(netdev);
6311 switch (adapter->int_mode) {
6312 case E1000E_INT_MODE_MSIX:
6313 e1000_intr_msix(adapter->pdev->irq, netdev);
6315 case E1000E_INT_MODE_MSI:
6316 disable_irq(adapter->pdev->irq);
6317 e1000_intr_msi(adapter->pdev->irq, netdev);
6318 enable_irq(adapter->pdev->irq);
6320 default: /* E1000E_INT_MODE_LEGACY */
6321 disable_irq(adapter->pdev->irq);
6322 e1000_intr(adapter->pdev->irq, netdev);
6323 enable_irq(adapter->pdev->irq);
6330 * e1000_io_error_detected - called when PCI error is detected
6331 * @pdev: Pointer to PCI device
6332 * @state: The current pci connection state
6334 * This function is called after a PCI bus error affecting
6335 * this device has been detected.
6337 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
6338 pci_channel_state_t state)
6340 struct net_device *netdev = pci_get_drvdata(pdev);
6341 struct e1000_adapter *adapter = netdev_priv(netdev);
6343 netif_device_detach(netdev);
6345 if (state == pci_channel_io_perm_failure)
6346 return PCI_ERS_RESULT_DISCONNECT;
6348 if (netif_running(netdev))
6349 e1000e_down(adapter);
6350 pci_disable_device(pdev);
6352 /* Request a slot slot reset. */
6353 return PCI_ERS_RESULT_NEED_RESET;
6357 * e1000_io_slot_reset - called after the pci bus has been reset.
6358 * @pdev: Pointer to PCI device
6360 * Restart the card from scratch, as if from a cold-boot. Implementation
6361 * resembles the first-half of the e1000_resume routine.
6363 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6365 struct net_device *netdev = pci_get_drvdata(pdev);
6366 struct e1000_adapter *adapter = netdev_priv(netdev);
6367 struct e1000_hw *hw = &adapter->hw;
6368 u16 aspm_disable_flag = 0;
6370 pci_ers_result_t result;
6372 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6373 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6374 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6375 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6376 if (aspm_disable_flag)
6377 e1000e_disable_aspm(pdev, aspm_disable_flag);
6379 err = pci_enable_device_mem(pdev);
6382 "Cannot re-enable PCI device after reset.\n");
6383 result = PCI_ERS_RESULT_DISCONNECT;
6385 pdev->state_saved = true;
6386 pci_restore_state(pdev);
6387 pci_set_master(pdev);
6389 pci_enable_wake(pdev, PCI_D3hot, 0);
6390 pci_enable_wake(pdev, PCI_D3cold, 0);
6392 e1000e_reset(adapter);
6394 result = PCI_ERS_RESULT_RECOVERED;
6397 pci_cleanup_aer_uncorrect_error_status(pdev);
6403 * e1000_io_resume - called when traffic can start flowing again.
6404 * @pdev: Pointer to PCI device
6406 * This callback is called when the error recovery driver tells us that
6407 * its OK to resume normal operation. Implementation resembles the
6408 * second-half of the e1000_resume routine.
6410 static void e1000_io_resume(struct pci_dev *pdev)
6412 struct net_device *netdev = pci_get_drvdata(pdev);
6413 struct e1000_adapter *adapter = netdev_priv(netdev);
6415 e1000_init_manageability_pt(adapter);
6417 if (netif_running(netdev)) {
6418 if (e1000e_up(adapter)) {
6420 "can't bring device back up after reset\n");
6425 netif_device_attach(netdev);
6427 /* If the controller has AMT, do not set DRV_LOAD until the interface
6428 * is up. For all other cases, let the f/w know that the h/w is now
6429 * under the control of the driver.
6431 if (!(adapter->flags & FLAG_HAS_AMT))
6432 e1000e_get_hw_control(adapter);
6435 static void e1000_print_device_info(struct e1000_adapter *adapter)
6437 struct e1000_hw *hw = &adapter->hw;
6438 struct net_device *netdev = adapter->netdev;
6440 u8 pba_str[E1000_PBANUM_LENGTH];
6442 /* print bus type/speed/width info */
6443 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
6445 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
6449 e_info("Intel(R) PRO/%s Network Connection\n",
6450 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
6451 ret_val = e1000_read_pba_string_generic(hw, pba_str,
6452 E1000_PBANUM_LENGTH);
6454 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
6455 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
6456 hw->mac.type, hw->phy.type, pba_str);
6459 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
6461 struct e1000_hw *hw = &adapter->hw;
6465 if (hw->mac.type != e1000_82573)
6468 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
6470 if (!ret_val && (!(buf & (1 << 0)))) {
6471 /* Deep Smart Power Down (DSPD) */
6472 dev_warn(&adapter->pdev->dev,
6473 "Warning: detected DSPD enabled in EEPROM\n");
6477 static int e1000_set_features(struct net_device *netdev,
6478 netdev_features_t features)
6480 struct e1000_adapter *adapter = netdev_priv(netdev);
6481 netdev_features_t changed = features ^ netdev->features;
6483 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
6484 adapter->flags |= FLAG_TSO_FORCE;
6486 if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
6487 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
6491 if (changed & NETIF_F_RXFCS) {
6492 if (features & NETIF_F_RXFCS) {
6493 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6495 /* We need to take it back to defaults, which might mean
6496 * stripping is still disabled at the adapter level.
6498 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
6499 adapter->flags2 |= FLAG2_CRC_STRIPPING;
6501 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6505 netdev->features = features;
6507 if (netif_running(netdev))
6508 e1000e_reinit_locked(adapter);
6510 e1000e_reset(adapter);
6515 static const struct net_device_ops e1000e_netdev_ops = {
6516 .ndo_open = e1000_open,
6517 .ndo_stop = e1000_close,
6518 .ndo_start_xmit = e1000_xmit_frame,
6519 .ndo_get_stats64 = e1000e_get_stats64,
6520 .ndo_set_rx_mode = e1000e_set_rx_mode,
6521 .ndo_set_mac_address = e1000_set_mac,
6522 .ndo_change_mtu = e1000_change_mtu,
6523 .ndo_do_ioctl = e1000_ioctl,
6524 .ndo_tx_timeout = e1000_tx_timeout,
6525 .ndo_validate_addr = eth_validate_addr,
6527 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
6528 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
6529 #ifdef CONFIG_NET_POLL_CONTROLLER
6530 .ndo_poll_controller = e1000_netpoll,
6532 .ndo_set_features = e1000_set_features,
6536 * e1000_probe - Device Initialization Routine
6537 * @pdev: PCI device information struct
6538 * @ent: entry in e1000_pci_tbl
6540 * Returns 0 on success, negative on failure
6542 * e1000_probe initializes an adapter identified by a pci_dev structure.
6543 * The OS initialization, configuring of the adapter private structure,
6544 * and a hardware reset occur.
6546 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6548 struct net_device *netdev;
6549 struct e1000_adapter *adapter;
6550 struct e1000_hw *hw;
6551 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
6552 resource_size_t mmio_start, mmio_len;
6553 resource_size_t flash_start, flash_len;
6554 static int cards_found;
6555 u16 aspm_disable_flag = 0;
6556 int bars, i, err, pci_using_dac;
6557 u16 eeprom_data = 0;
6558 u16 eeprom_apme_mask = E1000_EEPROM_APME;
6560 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
6561 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6562 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
6563 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6564 if (aspm_disable_flag)
6565 e1000e_disable_aspm(pdev, aspm_disable_flag);
6567 err = pci_enable_device_mem(pdev);
6572 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6576 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6579 "No usable DMA configuration, aborting\n");
6584 bars = pci_select_bars(pdev, IORESOURCE_MEM);
6585 err = pci_request_selected_regions_exclusive(pdev, bars,
6586 e1000e_driver_name);
6590 /* AER (Advanced Error Reporting) hooks */
6591 pci_enable_pcie_error_reporting(pdev);
6593 pci_set_master(pdev);
6594 /* PCI config space info */
6595 err = pci_save_state(pdev);
6597 goto err_alloc_etherdev;
6600 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6602 goto err_alloc_etherdev;
6604 SET_NETDEV_DEV(netdev, &pdev->dev);
6606 netdev->irq = pdev->irq;
6608 pci_set_drvdata(pdev, netdev);
6609 adapter = netdev_priv(netdev);
6611 adapter->netdev = netdev;
6612 adapter->pdev = pdev;
6614 adapter->pba = ei->pba;
6615 adapter->flags = ei->flags;
6616 adapter->flags2 = ei->flags2;
6617 adapter->hw.adapter = adapter;
6618 adapter->hw.mac.type = ei->mac;
6619 adapter->max_hw_frame_size = ei->max_hw_frame_size;
6620 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6622 mmio_start = pci_resource_start(pdev, 0);
6623 mmio_len = pci_resource_len(pdev, 0);
6626 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6627 if (!adapter->hw.hw_addr)
6630 if ((adapter->flags & FLAG_HAS_FLASH) &&
6631 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6632 flash_start = pci_resource_start(pdev, 1);
6633 flash_len = pci_resource_len(pdev, 1);
6634 adapter->hw.flash_address = ioremap(flash_start, flash_len);
6635 if (!adapter->hw.flash_address)
6639 /* Set default EEE advertisement */
6640 if (adapter->flags2 & FLAG2_HAS_EEE)
6641 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
6643 /* construct the net_device struct */
6644 netdev->netdev_ops = &e1000e_netdev_ops;
6645 e1000e_set_ethtool_ops(netdev);
6646 netdev->watchdog_timeo = 5 * HZ;
6647 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6648 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6650 netdev->mem_start = mmio_start;
6651 netdev->mem_end = mmio_start + mmio_len;
6653 adapter->bd_number = cards_found++;
6655 e1000e_check_options(adapter);
6657 /* setup adapter struct */
6658 err = e1000_sw_init(adapter);
6662 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6663 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6664 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6666 err = ei->get_variants(adapter);
6670 if ((adapter->flags & FLAG_IS_ICH) &&
6671 (adapter->flags & FLAG_READ_ONLY_NVM))
6672 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6674 hw->mac.ops.get_bus_info(&adapter->hw);
6676 adapter->hw.phy.autoneg_wait_to_complete = 0;
6678 /* Copper options */
6679 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
6680 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6681 adapter->hw.phy.disable_polarity_correction = 0;
6682 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6685 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6686 dev_info(&pdev->dev,
6687 "PHY reset is blocked due to SOL/IDER session.\n");
6689 /* Set initial default active device features */
6690 netdev->features = (NETIF_F_SG |
6691 NETIF_F_HW_VLAN_CTAG_RX |
6692 NETIF_F_HW_VLAN_CTAG_TX |
6699 /* Set user-changeable features (subset of all device features) */
6700 netdev->hw_features = netdev->features;
6701 netdev->hw_features |= NETIF_F_RXFCS;
6702 netdev->priv_flags |= IFF_SUPP_NOFCS;
6703 netdev->hw_features |= NETIF_F_RXALL;
6705 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6706 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6708 netdev->vlan_features |= (NETIF_F_SG |
6713 netdev->priv_flags |= IFF_UNICAST_FLT;
6715 if (pci_using_dac) {
6716 netdev->features |= NETIF_F_HIGHDMA;
6717 netdev->vlan_features |= NETIF_F_HIGHDMA;
6720 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6721 adapter->flags |= FLAG_MNG_PT_ENABLED;
6723 /* before reading the NVM, reset the controller to
6724 * put the device in a known good starting state
6726 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6728 /* systems with ASPM and others may see the checksum fail on the first
6729 * attempt. Let's give it a few tries
6732 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6735 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6741 e1000_eeprom_checks(adapter);
6743 /* copy the MAC address */
6744 if (e1000e_read_mac_addr(&adapter->hw))
6746 "NVM Read Error while reading MAC address\n");
6748 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6750 if (!is_valid_ether_addr(netdev->dev_addr)) {
6751 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
6757 init_timer(&adapter->watchdog_timer);
6758 adapter->watchdog_timer.function = e1000_watchdog;
6759 adapter->watchdog_timer.data = (unsigned long)adapter;
6761 init_timer(&adapter->phy_info_timer);
6762 adapter->phy_info_timer.function = e1000_update_phy_info;
6763 adapter->phy_info_timer.data = (unsigned long)adapter;
6765 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6766 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6767 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6768 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6769 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6771 /* Initialize link parameters. User can change them with ethtool */
6772 adapter->hw.mac.autoneg = 1;
6773 adapter->fc_autoneg = true;
6774 adapter->hw.fc.requested_mode = e1000_fc_default;
6775 adapter->hw.fc.current_mode = e1000_fc_default;
6776 adapter->hw.phy.autoneg_advertised = 0x2f;
6778 /* Initial Wake on LAN setting - If APM wake is enabled in
6779 * the EEPROM, enable the ACPI Magic Packet filter
6781 if (adapter->flags & FLAG_APME_IN_WUC) {
6782 /* APME bit in EEPROM is mapped to WUC.APME */
6783 eeprom_data = er32(WUC);
6784 eeprom_apme_mask = E1000_WUC_APME;
6785 if ((hw->mac.type > e1000_ich10lan) &&
6786 (eeprom_data & E1000_WUC_PHY_WAKE))
6787 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6788 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6789 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6790 (adapter->hw.bus.func == 1))
6791 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
6794 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
6798 /* fetch WoL from EEPROM */
6799 if (eeprom_data & eeprom_apme_mask)
6800 adapter->eeprom_wol |= E1000_WUFC_MAG;
6802 /* now that we have the eeprom settings, apply the special cases
6803 * where the eeprom may be wrong or the board simply won't support
6804 * wake on lan on a particular port
6806 if (!(adapter->flags & FLAG_HAS_WOL))
6807 adapter->eeprom_wol = 0;
6809 /* initialize the wol settings based on the eeprom settings */
6810 adapter->wol = adapter->eeprom_wol;
6812 /* make sure adapter isn't asleep if manageability is enabled */
6813 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
6814 (hw->mac.ops.check_mng_mode(hw)))
6815 device_wakeup_enable(&pdev->dev);
6817 /* save off EEPROM version number */
6818 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6820 /* reset the hardware with the new settings */
6821 e1000e_reset(adapter);
6823 /* If the controller has AMT, do not set DRV_LOAD until the interface
6824 * is up. For all other cases, let the f/w know that the h/w is now
6825 * under the control of the driver.
6827 if (!(adapter->flags & FLAG_HAS_AMT))
6828 e1000e_get_hw_control(adapter);
6830 strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
6831 err = register_netdev(netdev);
6835 /* carrier off reporting is important to ethtool even BEFORE open */
6836 netif_carrier_off(netdev);
6838 /* init PTP hardware clock */
6839 e1000e_ptp_init(adapter);
6841 e1000_print_device_info(adapter);
6843 if (pci_dev_run_wake(pdev))
6844 pm_runtime_put_noidle(&pdev->dev);
6849 if (!(adapter->flags & FLAG_HAS_AMT))
6850 e1000e_release_hw_control(adapter);
6852 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
6853 e1000_phy_hw_reset(&adapter->hw);
6855 kfree(adapter->tx_ring);
6856 kfree(adapter->rx_ring);
6858 if (adapter->hw.flash_address)
6859 iounmap(adapter->hw.flash_address);
6860 e1000e_reset_interrupt_capability(adapter);
6862 iounmap(adapter->hw.hw_addr);
6864 free_netdev(netdev);
6866 pci_release_selected_regions(pdev,
6867 pci_select_bars(pdev, IORESOURCE_MEM));
6870 pci_disable_device(pdev);
6875 * e1000_remove - Device Removal Routine
6876 * @pdev: PCI device information struct
6878 * e1000_remove is called by the PCI subsystem to alert the driver
6879 * that it should release a PCI device. The could be caused by a
6880 * Hot-Plug event, or because the driver is going to be removed from
6883 static void e1000_remove(struct pci_dev *pdev)
6885 struct net_device *netdev = pci_get_drvdata(pdev);
6886 struct e1000_adapter *adapter = netdev_priv(netdev);
6887 bool down = test_bit(__E1000_DOWN, &adapter->state);
6889 e1000e_ptp_remove(adapter);
6891 /* The timers may be rescheduled, so explicitly disable them
6892 * from being rescheduled.
6895 set_bit(__E1000_DOWN, &adapter->state);
6896 del_timer_sync(&adapter->watchdog_timer);
6897 del_timer_sync(&adapter->phy_info_timer);
6899 cancel_work_sync(&adapter->reset_task);
6900 cancel_work_sync(&adapter->watchdog_task);
6901 cancel_work_sync(&adapter->downshift_task);
6902 cancel_work_sync(&adapter->update_phy_task);
6903 cancel_work_sync(&adapter->print_hang_task);
6905 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
6906 cancel_work_sync(&adapter->tx_hwtstamp_work);
6907 if (adapter->tx_hwtstamp_skb) {
6908 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
6909 adapter->tx_hwtstamp_skb = NULL;
6913 if (!(netdev->flags & IFF_UP))
6914 e1000_power_down_phy(adapter);
6916 /* Don't lie to e1000_close() down the road. */
6918 clear_bit(__E1000_DOWN, &adapter->state);
6919 unregister_netdev(netdev);
6921 if (pci_dev_run_wake(pdev))
6922 pm_runtime_get_noresume(&pdev->dev);
6924 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6925 * would have already happened in close and is redundant.
6927 e1000e_release_hw_control(adapter);
6929 e1000e_reset_interrupt_capability(adapter);
6930 kfree(adapter->tx_ring);
6931 kfree(adapter->rx_ring);
6933 iounmap(adapter->hw.hw_addr);
6934 if (adapter->hw.flash_address)
6935 iounmap(adapter->hw.flash_address);
6936 pci_release_selected_regions(pdev,
6937 pci_select_bars(pdev, IORESOURCE_MEM));
6939 free_netdev(netdev);
6942 pci_disable_pcie_error_reporting(pdev);
6944 pci_disable_device(pdev);
6947 /* PCI Error Recovery (ERS) */
6948 static const struct pci_error_handlers e1000_err_handler = {
6949 .error_detected = e1000_io_error_detected,
6950 .slot_reset = e1000_io_slot_reset,
6951 .resume = e1000_io_resume,
6954 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6955 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6956 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6957 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6958 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
6960 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6961 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6962 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
6963 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
6964 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
6966 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
6967 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
6968 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
6969 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
6971 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
6972 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
6973 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
6975 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
6976 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
6977 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
6979 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
6980 board_80003es2lan },
6981 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
6982 board_80003es2lan },
6983 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
6984 board_80003es2lan },
6985 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
6986 board_80003es2lan },
6988 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
6989 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
6990 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
6991 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
6992 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
6993 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
6994 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
6995 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
6997 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
6998 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
6999 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
7000 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
7001 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
7002 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
7003 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
7004 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
7005 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
7007 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
7008 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
7009 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
7011 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
7012 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
7013 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
7015 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
7016 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
7017 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
7018 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
7020 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
7021 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
7023 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
7024 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
7025 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
7026 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
7027 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
7028 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
7029 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
7030 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
7032 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
7034 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
7036 static const struct dev_pm_ops e1000_pm_ops = {
7037 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
7038 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
7042 /* PCI Device API Driver */
7043 static struct pci_driver e1000_driver = {
7044 .name = e1000e_driver_name,
7045 .id_table = e1000_pci_tbl,
7046 .probe = e1000_probe,
7047 .remove = e1000_remove,
7049 .pm = &e1000_pm_ops,
7051 .shutdown = e1000_shutdown,
7052 .err_handler = &e1000_err_handler
7056 * e1000_init_module - Driver Registration Routine
7058 * e1000_init_module is the first routine called when the driver is
7059 * loaded. All it does is register with the PCI subsystem.
7061 static int __init e1000_init_module(void)
7064 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
7065 e1000e_driver_version);
7066 pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n");
7067 ret = pci_register_driver(&e1000_driver);
7071 module_init(e1000_init_module);
7074 * e1000_exit_module - Driver Exit Cleanup Routine
7076 * e1000_exit_module is called just before the driver is removed
7079 static void __exit e1000_exit_module(void)
7081 pci_unregister_driver(&e1000_driver);
7083 module_exit(e1000_exit_module);
7085 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
7086 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
7087 MODULE_LICENSE("GPL");
7088 MODULE_VERSION(DRV_VERSION);