tg3: Skip powering down function 0 on certain serdes devices
[cascardo/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     131
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "April 09, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 udelay(10);
748         }
749
750         if (status != bit) {
751                 /* Revoke the lock request. */
752                 tg3_ape_write32(tp, gnt + off, bit);
753                 ret = -EBUSY;
754         }
755
756         return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761         u32 gnt, bit;
762
763         if (!tg3_flag(tp, ENABLE_APE))
764                 return;
765
766         switch (locknum) {
767         case TG3_APE_LOCK_GPIO:
768                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769                         return;
770         case TG3_APE_LOCK_GRC:
771         case TG3_APE_LOCK_MEM:
772                 if (!tp->pci_fn)
773                         bit = APE_LOCK_GRANT_DRIVER;
774                 else
775                         bit = 1 << tp->pci_fn;
776                 break;
777         case TG3_APE_LOCK_PHY0:
778         case TG3_APE_LOCK_PHY1:
779         case TG3_APE_LOCK_PHY2:
780         case TG3_APE_LOCK_PHY3:
781                 bit = APE_LOCK_GRANT_DRIVER;
782                 break;
783         default:
784                 return;
785         }
786
787         if (tg3_asic_rev(tp) == ASIC_REV_5761)
788                 gnt = TG3_APE_LOCK_GRANT;
789         else
790                 gnt = TG3_APE_PER_LOCK_GRANT;
791
792         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797         u32 apedata;
798
799         while (timeout_us) {
800                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801                         return -EBUSY;
802
803                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805                         break;
806
807                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809                 udelay(10);
810                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811         }
812
813         return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818         u32 i, apedata;
819
820         for (i = 0; i < timeout_us / 10; i++) {
821                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824                         break;
825
826                 udelay(10);
827         }
828
829         return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833                                    u32 len)
834 {
835         int err;
836         u32 i, bufoff, msgoff, maxlen, apedata;
837
838         if (!tg3_flag(tp, APE_HAS_NCSI))
839                 return 0;
840
841         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842         if (apedata != APE_SEG_SIG_MAGIC)
843                 return -ENODEV;
844
845         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846         if (!(apedata & APE_FW_STATUS_READY))
847                 return -EAGAIN;
848
849         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850                  TG3_APE_SHMEM_BASE;
851         msgoff = bufoff + 2 * sizeof(u32);
852         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854         while (len) {
855                 u32 length;
856
857                 /* Cap xfer sizes to scratchpad limits. */
858                 length = (len > maxlen) ? maxlen : len;
859                 len -= length;
860
861                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862                 if (!(apedata & APE_FW_STATUS_READY))
863                         return -EAGAIN;
864
865                 /* Wait for up to 1 msec for APE to service previous event. */
866                 err = tg3_ape_event_lock(tp, 1000);
867                 if (err)
868                         return err;
869
870                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871                           APE_EVENT_STATUS_SCRTCHPD_READ |
872                           APE_EVENT_STATUS_EVENT_PENDING;
873                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875                 tg3_ape_write32(tp, bufoff, base_off);
876                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881                 base_off += length;
882
883                 if (tg3_ape_wait_for_event(tp, 30000))
884                         return -EAGAIN;
885
886                 for (i = 0; length; i += 4, length -= 4) {
887                         u32 val = tg3_ape_read32(tp, msgoff + i);
888                         memcpy(data, &val, sizeof(u32));
889                         data++;
890                 }
891         }
892
893         return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898         int err;
899         u32 apedata;
900
901         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902         if (apedata != APE_SEG_SIG_MAGIC)
903                 return -EAGAIN;
904
905         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906         if (!(apedata & APE_FW_STATUS_READY))
907                 return -EAGAIN;
908
909         /* Wait for up to 1 millisecond for APE to service previous event. */
910         err = tg3_ape_event_lock(tp, 1000);
911         if (err)
912                 return err;
913
914         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915                         event | APE_EVENT_STATUS_EVENT_PENDING);
916
917         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920         return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925         u32 event;
926         u32 apedata;
927
928         if (!tg3_flag(tp, ENABLE_APE))
929                 return;
930
931         switch (kind) {
932         case RESET_KIND_INIT:
933                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934                                 APE_HOST_SEG_SIG_MAGIC);
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936                                 APE_HOST_SEG_LEN_MAGIC);
937                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942                                 APE_HOST_BEHAV_NO_PHYLOCK);
943                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944                                     TG3_APE_HOST_DRVR_STATE_START);
945
946                 event = APE_EVENT_STATUS_STATE_START;
947                 break;
948         case RESET_KIND_SHUTDOWN:
949                 /* With the interface we are currently using,
950                  * APE does not track driver state.  Wiping
951                  * out the HOST SEGMENT SIGNATURE forces
952                  * the APE to assume OS absent status.
953                  */
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956                 if (device_may_wakeup(&tp->pdev->dev) &&
957                     tg3_flag(tp, WOL_ENABLE)) {
958                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959                                             TG3_APE_HOST_WOL_SPEED_AUTO);
960                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961                 } else
962                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966                 event = APE_EVENT_STATUS_STATE_UNLOAD;
967                 break;
968         case RESET_KIND_SUSPEND:
969                 event = APE_EVENT_STATUS_STATE_SUSPEND;
970                 break;
971         default:
972                 return;
973         }
974
975         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977         tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tw32(TG3PCI_MISC_HOST_CTRL,
985              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986         for (i = 0; i < tp->irq_max; i++)
987                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992         int i;
993
994         tp->irq_sync = 0;
995         wmb();
996
997         tw32(TG3PCI_MISC_HOST_CTRL,
998              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001         for (i = 0; i < tp->irq_cnt; i++) {
1002                 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005                 if (tg3_flag(tp, 1SHOT_MSI))
1006                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008                 tp->coal_now |= tnapi->coal_now;
1009         }
1010
1011         /* Force an initial interrupt */
1012         if (!tg3_flag(tp, TAGGED_STATUS) &&
1013             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015         else
1016                 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023         struct tg3 *tp = tnapi->tp;
1024         struct tg3_hw_status *sblk = tnapi->hw_status;
1025         unsigned int work_exists = 0;
1026
1027         /* check for phy events */
1028         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029                 if (sblk->status & SD_STATUS_LINK_CHG)
1030                         work_exists = 1;
1031         }
1032
1033         /* check for TX work to do */
1034         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035                 work_exists = 1;
1036
1037         /* check for RX work to do */
1038         if (tnapi->rx_rcb_prod_idx &&
1039             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040                 work_exists = 1;
1041
1042         return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052         struct tg3 *tp = tnapi->tp;
1053
1054         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055         mmiowb();
1056
1057         /* When doing tagged status, this work check is unnecessary.
1058          * The last_tag we write above tells the chip which piece of
1059          * work we've completed.
1060          */
1061         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068         u32 clock_ctrl;
1069         u32 orig_clock_ctrl;
1070
1071         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072                 return;
1073
1074         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076         orig_clock_ctrl = clock_ctrl;
1077         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078                        CLOCK_CTRL_CLKRUN_OENABLE |
1079                        0x1f);
1080         tp->pci_clock_ctrl = clock_ctrl;
1081
1082         if (tg3_flag(tp, 5705_PLUS)) {
1083                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086                 }
1087         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089                             clock_ctrl |
1090                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091                             40);
1092                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094                             40);
1095         }
1096         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS  5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102                          u32 *val)
1103 {
1104         u32 frame_val;
1105         unsigned int loops;
1106         int ret;
1107
1108         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109                 tw32_f(MAC_MI_MODE,
1110                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111                 udelay(80);
1112         }
1113
1114         tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116         *val = 0x0;
1117
1118         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119                       MI_COM_PHY_ADDR_MASK);
1120         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121                       MI_COM_REG_ADDR_MASK);
1122         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124         tw32_f(MAC_MI_COM, frame_val);
1125
1126         loops = PHY_BUSY_LOOPS;
1127         while (loops != 0) {
1128                 udelay(10);
1129                 frame_val = tr32(MAC_MI_COM);
1130
1131                 if ((frame_val & MI_COM_BUSY) == 0) {
1132                         udelay(5);
1133                         frame_val = tr32(MAC_MI_COM);
1134                         break;
1135                 }
1136                 loops -= 1;
1137         }
1138
1139         ret = -EBUSY;
1140         if (loops != 0) {
1141                 *val = frame_val & MI_COM_DATA_MASK;
1142                 ret = 0;
1143         }
1144
1145         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147                 udelay(80);
1148         }
1149
1150         tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152         return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161                           u32 val)
1162 {
1163         u32 frame_val;
1164         unsigned int loops;
1165         int ret;
1166
1167         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169                 return 0;
1170
1171         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172                 tw32_f(MAC_MI_MODE,
1173                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174                 udelay(80);
1175         }
1176
1177         tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180                       MI_COM_PHY_ADDR_MASK);
1181         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182                       MI_COM_REG_ADDR_MASK);
1183         frame_val |= (val & MI_COM_DATA_MASK);
1184         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186         tw32_f(MAC_MI_COM, frame_val);
1187
1188         loops = PHY_BUSY_LOOPS;
1189         while (loops != 0) {
1190                 udelay(10);
1191                 frame_val = tr32(MAC_MI_COM);
1192                 if ((frame_val & MI_COM_BUSY) == 0) {
1193                         udelay(5);
1194                         frame_val = tr32(MAC_MI_COM);
1195                         break;
1196                 }
1197                 loops -= 1;
1198         }
1199
1200         ret = -EBUSY;
1201         if (loops != 0)
1202                 ret = 0;
1203
1204         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206                 udelay(80);
1207         }
1208
1209         tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211         return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221         int err;
1222
1223         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224         if (err)
1225                 goto done;
1226
1227         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228         if (err)
1229                 goto done;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239         return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244         int err;
1245
1246         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251         if (err)
1252                 goto done;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262         return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270         if (!err)
1271                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273         return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278         int err;
1279
1280         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281         if (!err)
1282                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284         return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289         int err;
1290
1291         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1294         if (!err)
1295                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297         return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303                 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310         u32 val;
1311         int err;
1312
1313         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315         if (err)
1316                 return err;
1317         if (enable)
1318
1319                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320         else
1321                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326         return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331         u32 phy_control;
1332         int limit, err;
1333
1334         /* OK, reset it, and poll the BMCR_RESET bit until it
1335          * clears or we time out.
1336          */
1337         phy_control = BMCR_RESET;
1338         err = tg3_writephy(tp, MII_BMCR, phy_control);
1339         if (err != 0)
1340                 return -EBUSY;
1341
1342         limit = 5000;
1343         while (limit--) {
1344                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345                 if (err != 0)
1346                         return -EBUSY;
1347
1348                 if ((phy_control & BMCR_RESET) == 0) {
1349                         udelay(40);
1350                         break;
1351                 }
1352                 udelay(10);
1353         }
1354         if (limit < 0)
1355                 return -EBUSY;
1356
1357         return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362         struct tg3 *tp = bp->priv;
1363         u32 val;
1364
1365         spin_lock_bh(&tp->lock);
1366
1367         if (tg3_readphy(tp, reg, &val))
1368                 val = -EIO;
1369
1370         spin_unlock_bh(&tp->lock);
1371
1372         return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377         struct tg3 *tp = bp->priv;
1378         u32 ret = 0;
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         if (tg3_writephy(tp, reg, val))
1383                 ret = -EIO;
1384
1385         spin_unlock_bh(&tp->lock);
1386
1387         return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392         return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397         u32 val;
1398         struct phy_device *phydev;
1399
1400         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402         case PHY_ID_BCM50610:
1403         case PHY_ID_BCM50610M:
1404                 val = MAC_PHYCFG2_50610_LED_MODES;
1405                 break;
1406         case PHY_ID_BCMAC131:
1407                 val = MAC_PHYCFG2_AC131_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8211C:
1410                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8201E:
1413                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414                 break;
1415         default:
1416                 return;
1417         }
1418
1419         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420                 tw32(MAC_PHYCFG2, val);
1421
1422                 val = tr32(MAC_PHYCFG1);
1423                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426                 tw32(MAC_PHYCFG1, val);
1427
1428                 return;
1429         }
1430
1431         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433                        MAC_PHYCFG2_FMODE_MASK_MASK |
1434                        MAC_PHYCFG2_GMODE_MASK_MASK |
1435                        MAC_PHYCFG2_ACT_MASK_MASK   |
1436                        MAC_PHYCFG2_QUAL_MASK_MASK |
1437                        MAC_PHYCFG2_INBAND_ENABLE;
1438
1439         tw32(MAC_PHYCFG2, val);
1440
1441         val = tr32(MAC_PHYCFG1);
1442         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449         }
1450         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452         tw32(MAC_PHYCFG1, val);
1453
1454         val = tr32(MAC_EXT_RGMII_MODE);
1455         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456                  MAC_RGMII_MODE_RX_QUALITY |
1457                  MAC_RGMII_MODE_RX_ACTIVITY |
1458                  MAC_RGMII_MODE_RX_ENG_DET |
1459                  MAC_RGMII_MODE_TX_ENABLE |
1460                  MAC_RGMII_MODE_TX_LOWPWR |
1461                  MAC_RGMII_MODE_TX_RESET);
1462         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464                         val |= MAC_RGMII_MODE_RX_INT_B |
1465                                MAC_RGMII_MODE_RX_QUALITY |
1466                                MAC_RGMII_MODE_RX_ACTIVITY |
1467                                MAC_RGMII_MODE_RX_ENG_DET;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_RGMII_MODE_TX_ENABLE |
1470                                MAC_RGMII_MODE_TX_LOWPWR |
1471                                MAC_RGMII_MODE_TX_RESET;
1472         }
1473         tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479         tw32_f(MAC_MI_MODE, tp->mi_mode);
1480         udelay(80);
1481
1482         if (tg3_flag(tp, MDIOBUS_INITED) &&
1483             tg3_asic_rev(tp) == ASIC_REV_5785)
1484                 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489         int i;
1490         u32 reg;
1491         struct phy_device *phydev;
1492
1493         if (tg3_flag(tp, 5717_PLUS)) {
1494                 u32 is_serdes;
1495
1496                 tp->phy_addr = tp->pci_fn + 1;
1497
1498                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500                 else
1501                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1503                 if (is_serdes)
1504                         tp->phy_addr += 7;
1505         } else
1506                 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508         tg3_mdio_start(tp);
1509
1510         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511                 return 0;
1512
1513         tp->mdio_bus = mdiobus_alloc();
1514         if (tp->mdio_bus == NULL)
1515                 return -ENOMEM;
1516
1517         tp->mdio_bus->name     = "tg3 mdio bus";
1518         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520         tp->mdio_bus->priv     = tp;
1521         tp->mdio_bus->parent   = &tp->pdev->dev;
1522         tp->mdio_bus->read     = &tg3_mdio_read;
1523         tp->mdio_bus->write    = &tg3_mdio_write;
1524         tp->mdio_bus->reset    = &tg3_mdio_reset;
1525         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527
1528         for (i = 0; i < PHY_MAX_ADDR; i++)
1529                 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531         /* The bus registration will look for all the PHYs on the mdio bus.
1532          * Unfortunately, it does not ensure the PHY is powered up before
1533          * accessing the PHY ID registers.  A chip reset is the
1534          * quickest way to bring the device back to an operational state..
1535          */
1536         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537                 tg3_bmcr_reset(tp);
1538
1539         i = mdiobus_register(tp->mdio_bus);
1540         if (i) {
1541                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542                 mdiobus_free(tp->mdio_bus);
1543                 return i;
1544         }
1545
1546         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548         if (!phydev || !phydev->drv) {
1549                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550                 mdiobus_unregister(tp->mdio_bus);
1551                 mdiobus_free(tp->mdio_bus);
1552                 return -ENODEV;
1553         }
1554
1555         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556         case PHY_ID_BCM57780:
1557                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559                 break;
1560         case PHY_ID_BCM50610:
1561         case PHY_ID_BCM50610M:
1562                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563                                      PHY_BRCM_RX_REFCLK_UNUSED |
1564                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572                 /* fallthru */
1573         case PHY_ID_RTL8211C:
1574                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575                 break;
1576         case PHY_ID_RTL8201E:
1577         case PHY_ID_BCMAC131:
1578                 phydev->interface = PHY_INTERFACE_MODE_MII;
1579                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581                 break;
1582         }
1583
1584         tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587                 tg3_mdio_config_5785(tp);
1588
1589         return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594         if (tg3_flag(tp, MDIOBUS_INITED)) {
1595                 tg3_flag_clear(tp, MDIOBUS_INITED);
1596                 mdiobus_unregister(tp->mdio_bus);
1597                 mdiobus_free(tp->mdio_bus);
1598         }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604         u32 val;
1605
1606         val = tr32(GRC_RX_CPU_EVENT);
1607         val |= GRC_RX_CPU_DRIVER_EVENT;
1608         tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610         tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618         int i;
1619         unsigned int delay_cnt;
1620         long time_remain;
1621
1622         /* If enough time has passed, no wait is necessary. */
1623         time_remain = (long)(tp->last_event_jiffies + 1 +
1624                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625                       (long)jiffies;
1626         if (time_remain < 0)
1627                 return;
1628
1629         /* Check if we can shorten the wait time. */
1630         delay_cnt = jiffies_to_usecs(time_remain);
1631         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633         delay_cnt = (delay_cnt >> 3) + 1;
1634
1635         for (i = 0; i < delay_cnt; i++) {
1636                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637                         break;
1638                 udelay(8);
1639         }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645         u32 reg, val;
1646
1647         val = 0;
1648         if (!tg3_readphy(tp, MII_BMCR, &reg))
1649                 val = reg << 16;
1650         if (!tg3_readphy(tp, MII_BMSR, &reg))
1651                 val |= (reg & 0xffff);
1652         *data++ = val;
1653
1654         val = 0;
1655         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656                 val = reg << 16;
1657         if (!tg3_readphy(tp, MII_LPA, &reg))
1658                 val |= (reg & 0xffff);
1659         *data++ = val;
1660
1661         val = 0;
1662         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664                         val = reg << 16;
1665                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666                         val |= (reg & 0xffff);
1667         }
1668         *data++ = val;
1669
1670         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671                 val = reg << 16;
1672         else
1673                 val = 0;
1674         *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680         u32 data[4];
1681
1682         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683                 return;
1684
1685         tg3_phy_gather_ump_data(tp, data);
1686
1687         tg3_wait_for_event_ack(tp);
1688
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696         tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703                 /* Wait for RX cpu to ACK the previous event. */
1704                 tg3_wait_for_event_ack(tp);
1705
1706                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708                 tg3_generate_fw_event(tp);
1709
1710                 /* Wait for RX cpu to ACK this event. */
1711                 tg3_wait_for_event_ack(tp);
1712         }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722                 switch (kind) {
1723                 case RESET_KIND_INIT:
1724                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725                                       DRV_STATE_START);
1726                         break;
1727
1728                 case RESET_KIND_SHUTDOWN:
1729                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730                                       DRV_STATE_UNLOAD);
1731                         break;
1732
1733                 case RESET_KIND_SUSPEND:
1734                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735                                       DRV_STATE_SUSPEND);
1736                         break;
1737
1738                 default:
1739                         break;
1740                 }
1741         }
1742
1743         if (kind == RESET_KIND_INIT ||
1744             kind == RESET_KIND_SUSPEND)
1745                 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752                 switch (kind) {
1753                 case RESET_KIND_INIT:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_START_DONE);
1756                         break;
1757
1758                 case RESET_KIND_SHUTDOWN:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_UNLOAD_DONE);
1761                         break;
1762
1763                 default:
1764                         break;
1765                 }
1766         }
1767
1768         if (kind == RESET_KIND_SHUTDOWN)
1769                 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775         if (tg3_flag(tp, ENABLE_ASF)) {
1776                 switch (kind) {
1777                 case RESET_KIND_INIT:
1778                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779                                       DRV_STATE_START);
1780                         break;
1781
1782                 case RESET_KIND_SHUTDOWN:
1783                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784                                       DRV_STATE_UNLOAD);
1785                         break;
1786
1787                 case RESET_KIND_SUSPEND:
1788                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789                                       DRV_STATE_SUSPEND);
1790                         break;
1791
1792                 default:
1793                         break;
1794                 }
1795         }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800         int i;
1801         u32 val;
1802
1803         if (tg3_flag(tp, IS_SSB_CORE)) {
1804                 /* We don't use firmware. */
1805                 return 0;
1806         }
1807
1808         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809                 /* Wait up to 20ms for init done. */
1810                 for (i = 0; i < 200; i++) {
1811                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812                                 return 0;
1813                         udelay(100);
1814                 }
1815                 return -ENODEV;
1816         }
1817
1818         /* Wait for firmware initialization to complete. */
1819         for (i = 0; i < 100000; i++) {
1820                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822                         break;
1823                 udelay(10);
1824         }
1825
1826         /* Chip might not be fitted with firmware.  Some Sun onboard
1827          * parts are configured like that.  So don't signal the timeout
1828          * of the above loop as an error, but do report the lack of
1829          * running firmware once.
1830          */
1831         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834                 netdev_info(tp->dev, "No firmware running\n");
1835         }
1836
1837         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838                 /* The 57765 A0 needs a little more
1839                  * time to do some important work.
1840                  */
1841                 mdelay(10);
1842         }
1843
1844         return 0;
1845 }
1846
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849         if (!netif_carrier_ok(tp->dev)) {
1850                 netif_info(tp, link, tp->dev, "Link is down\n");
1851                 tg3_ump_link_report(tp);
1852         } else if (netif_msg_link(tp)) {
1853                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854                             (tp->link_config.active_speed == SPEED_1000 ?
1855                              1000 :
1856                              (tp->link_config.active_speed == SPEED_100 ?
1857                               100 : 10)),
1858                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1859                              "full" : "half"));
1860
1861                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863                             "on" : "off",
1864                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865                             "on" : "off");
1866
1867                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868                         netdev_info(tp->dev, "EEE is %s\n",
1869                                     tp->setlpicnt ? "enabled" : "disabled");
1870
1871                 tg3_ump_link_report(tp);
1872         }
1873
1874         tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876
1877 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1878 {
1879         u32 flowctrl = 0;
1880
1881         if (adv & ADVERTISE_PAUSE_CAP) {
1882                 flowctrl |= FLOW_CTRL_RX;
1883                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1884                         flowctrl |= FLOW_CTRL_TX;
1885         } else if (adv & ADVERTISE_PAUSE_ASYM)
1886                 flowctrl |= FLOW_CTRL_TX;
1887
1888         return flowctrl;
1889 }
1890
1891 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1892 {
1893         u16 miireg;
1894
1895         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1896                 miireg = ADVERTISE_1000XPAUSE;
1897         else if (flow_ctrl & FLOW_CTRL_TX)
1898                 miireg = ADVERTISE_1000XPSE_ASYM;
1899         else if (flow_ctrl & FLOW_CTRL_RX)
1900                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1901         else
1902                 miireg = 0;
1903
1904         return miireg;
1905 }
1906
1907 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1908 {
1909         u32 flowctrl = 0;
1910
1911         if (adv & ADVERTISE_1000XPAUSE) {
1912                 flowctrl |= FLOW_CTRL_RX;
1913                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1914                         flowctrl |= FLOW_CTRL_TX;
1915         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1916                 flowctrl |= FLOW_CTRL_TX;
1917
1918         return flowctrl;
1919 }
1920
1921 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1922 {
1923         u8 cap = 0;
1924
1925         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1926                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1927         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1928                 if (lcladv & ADVERTISE_1000XPAUSE)
1929                         cap = FLOW_CTRL_RX;
1930                 if (rmtadv & ADVERTISE_1000XPAUSE)
1931                         cap = FLOW_CTRL_TX;
1932         }
1933
1934         return cap;
1935 }
1936
1937 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1938 {
1939         u8 autoneg;
1940         u8 flowctrl = 0;
1941         u32 old_rx_mode = tp->rx_mode;
1942         u32 old_tx_mode = tp->tx_mode;
1943
1944         if (tg3_flag(tp, USE_PHYLIB))
1945                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1946         else
1947                 autoneg = tp->link_config.autoneg;
1948
1949         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1950                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1951                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1952                 else
1953                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1954         } else
1955                 flowctrl = tp->link_config.flowctrl;
1956
1957         tp->link_config.active_flowctrl = flowctrl;
1958
1959         if (flowctrl & FLOW_CTRL_RX)
1960                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1961         else
1962                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1963
1964         if (old_rx_mode != tp->rx_mode)
1965                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1966
1967         if (flowctrl & FLOW_CTRL_TX)
1968                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1969         else
1970                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1971
1972         if (old_tx_mode != tp->tx_mode)
1973                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1974 }
1975
1976 static void tg3_adjust_link(struct net_device *dev)
1977 {
1978         u8 oldflowctrl, linkmesg = 0;
1979         u32 mac_mode, lcl_adv, rmt_adv;
1980         struct tg3 *tp = netdev_priv(dev);
1981         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1982
1983         spin_lock_bh(&tp->lock);
1984
1985         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1986                                     MAC_MODE_HALF_DUPLEX);
1987
1988         oldflowctrl = tp->link_config.active_flowctrl;
1989
1990         if (phydev->link) {
1991                 lcl_adv = 0;
1992                 rmt_adv = 0;
1993
1994                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1995                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1996                 else if (phydev->speed == SPEED_1000 ||
1997                          tg3_asic_rev(tp) != ASIC_REV_5785)
1998                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1999                 else
2000                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2001
2002                 if (phydev->duplex == DUPLEX_HALF)
2003                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2004                 else {
2005                         lcl_adv = mii_advertise_flowctrl(
2006                                   tp->link_config.flowctrl);
2007
2008                         if (phydev->pause)
2009                                 rmt_adv = LPA_PAUSE_CAP;
2010                         if (phydev->asym_pause)
2011                                 rmt_adv |= LPA_PAUSE_ASYM;
2012                 }
2013
2014                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2015         } else
2016                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2017
2018         if (mac_mode != tp->mac_mode) {
2019                 tp->mac_mode = mac_mode;
2020                 tw32_f(MAC_MODE, tp->mac_mode);
2021                 udelay(40);
2022         }
2023
2024         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2025                 if (phydev->speed == SPEED_10)
2026                         tw32(MAC_MI_STAT,
2027                              MAC_MI_STAT_10MBPS_MODE |
2028                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2029                 else
2030                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2031         }
2032
2033         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2034                 tw32(MAC_TX_LENGTHS,
2035                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2036                       (6 << TX_LENGTHS_IPG_SHIFT) |
2037                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2038         else
2039                 tw32(MAC_TX_LENGTHS,
2040                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2041                       (6 << TX_LENGTHS_IPG_SHIFT) |
2042                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2043
2044         if (phydev->link != tp->old_link ||
2045             phydev->speed != tp->link_config.active_speed ||
2046             phydev->duplex != tp->link_config.active_duplex ||
2047             oldflowctrl != tp->link_config.active_flowctrl)
2048                 linkmesg = 1;
2049
2050         tp->old_link = phydev->link;
2051         tp->link_config.active_speed = phydev->speed;
2052         tp->link_config.active_duplex = phydev->duplex;
2053
2054         spin_unlock_bh(&tp->lock);
2055
2056         if (linkmesg)
2057                 tg3_link_report(tp);
2058 }
2059
2060 static int tg3_phy_init(struct tg3 *tp)
2061 {
2062         struct phy_device *phydev;
2063
2064         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2065                 return 0;
2066
2067         /* Bring the PHY back to a known state. */
2068         tg3_bmcr_reset(tp);
2069
2070         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2071
2072         /* Attach the MAC to the PHY. */
2073         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2074                              tg3_adjust_link, phydev->interface);
2075         if (IS_ERR(phydev)) {
2076                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2077                 return PTR_ERR(phydev);
2078         }
2079
2080         /* Mask with MAC supported features. */
2081         switch (phydev->interface) {
2082         case PHY_INTERFACE_MODE_GMII:
2083         case PHY_INTERFACE_MODE_RGMII:
2084                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2085                         phydev->supported &= (PHY_GBIT_FEATURES |
2086                                               SUPPORTED_Pause |
2087                                               SUPPORTED_Asym_Pause);
2088                         break;
2089                 }
2090                 /* fallthru */
2091         case PHY_INTERFACE_MODE_MII:
2092                 phydev->supported &= (PHY_BASIC_FEATURES |
2093                                       SUPPORTED_Pause |
2094                                       SUPPORTED_Asym_Pause);
2095                 break;
2096         default:
2097                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2098                 return -EINVAL;
2099         }
2100
2101         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2102
2103         phydev->advertising = phydev->supported;
2104
2105         return 0;
2106 }
2107
2108 static void tg3_phy_start(struct tg3 *tp)
2109 {
2110         struct phy_device *phydev;
2111
2112         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2113                 return;
2114
2115         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2116
2117         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2118                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2119                 phydev->speed = tp->link_config.speed;
2120                 phydev->duplex = tp->link_config.duplex;
2121                 phydev->autoneg = tp->link_config.autoneg;
2122                 phydev->advertising = tp->link_config.advertising;
2123         }
2124
2125         phy_start(phydev);
2126
2127         phy_start_aneg(phydev);
2128 }
2129
2130 static void tg3_phy_stop(struct tg3 *tp)
2131 {
2132         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2133                 return;
2134
2135         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2136 }
2137
2138 static void tg3_phy_fini(struct tg3 *tp)
2139 {
2140         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2141                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2142                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2143         }
2144 }
2145
2146 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2147 {
2148         int err;
2149         u32 val;
2150
2151         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2152                 return 0;
2153
2154         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2155                 /* Cannot do read-modify-write on 5401 */
2156                 err = tg3_phy_auxctl_write(tp,
2157                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2158                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2159                                            0x4c20);
2160                 goto done;
2161         }
2162
2163         err = tg3_phy_auxctl_read(tp,
2164                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2165         if (err)
2166                 return err;
2167
2168         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2169         err = tg3_phy_auxctl_write(tp,
2170                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2171
2172 done:
2173         return err;
2174 }
2175
2176 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2177 {
2178         u32 phytest;
2179
2180         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2181                 u32 phy;
2182
2183                 tg3_writephy(tp, MII_TG3_FET_TEST,
2184                              phytest | MII_TG3_FET_SHADOW_EN);
2185                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2186                         if (enable)
2187                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2188                         else
2189                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2190                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2191                 }
2192                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2193         }
2194 }
2195
2196 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2197 {
2198         u32 reg;
2199
2200         if (!tg3_flag(tp, 5705_PLUS) ||
2201             (tg3_flag(tp, 5717_PLUS) &&
2202              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2203                 return;
2204
2205         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2206                 tg3_phy_fet_toggle_apd(tp, enable);
2207                 return;
2208         }
2209
2210         reg = MII_TG3_MISC_SHDW_WREN |
2211               MII_TG3_MISC_SHDW_SCR5_SEL |
2212               MII_TG3_MISC_SHDW_SCR5_LPED |
2213               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2214               MII_TG3_MISC_SHDW_SCR5_SDTL |
2215               MII_TG3_MISC_SHDW_SCR5_C125OE;
2216         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2217                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2218
2219         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2220
2221
2222         reg = MII_TG3_MISC_SHDW_WREN |
2223               MII_TG3_MISC_SHDW_APD_SEL |
2224               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2225         if (enable)
2226                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2227
2228         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2229 }
2230
2231 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2232 {
2233         u32 phy;
2234
2235         if (!tg3_flag(tp, 5705_PLUS) ||
2236             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2237                 return;
2238
2239         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2240                 u32 ephy;
2241
2242                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2243                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2244
2245                         tg3_writephy(tp, MII_TG3_FET_TEST,
2246                                      ephy | MII_TG3_FET_SHADOW_EN);
2247                         if (!tg3_readphy(tp, reg, &phy)) {
2248                                 if (enable)
2249                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2250                                 else
2251                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2252                                 tg3_writephy(tp, reg, phy);
2253                         }
2254                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2255                 }
2256         } else {
2257                 int ret;
2258
2259                 ret = tg3_phy_auxctl_read(tp,
2260                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2261                 if (!ret) {
2262                         if (enable)
2263                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2264                         else
2265                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2266                         tg3_phy_auxctl_write(tp,
2267                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2268                 }
2269         }
2270 }
2271
2272 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2273 {
2274         int ret;
2275         u32 val;
2276
2277         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2278                 return;
2279
2280         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2281         if (!ret)
2282                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2283                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2284 }
2285
2286 static void tg3_phy_apply_otp(struct tg3 *tp)
2287 {
2288         u32 otp, phy;
2289
2290         if (!tp->phy_otp)
2291                 return;
2292
2293         otp = tp->phy_otp;
2294
2295         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2296                 return;
2297
2298         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2299         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2300         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2301
2302         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2303               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2304         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2305
2306         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2307         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2308         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2309
2310         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2311         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2312
2313         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2314         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2315
2316         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2317               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2318         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2319
2320         tg3_phy_toggle_auxctl_smdsp(tp, false);
2321 }
2322
2323 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2324 {
2325         u32 val;
2326
2327         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2328                 return;
2329
2330         tp->setlpicnt = 0;
2331
2332         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2333             current_link_up &&
2334             tp->link_config.active_duplex == DUPLEX_FULL &&
2335             (tp->link_config.active_speed == SPEED_100 ||
2336              tp->link_config.active_speed == SPEED_1000)) {
2337                 u32 eeectl;
2338
2339                 if (tp->link_config.active_speed == SPEED_1000)
2340                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2341                 else
2342                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2343
2344                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2345
2346                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2347                                   TG3_CL45_D7_EEERES_STAT, &val);
2348
2349                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2351                         tp->setlpicnt = 2;
2352         }
2353
2354         if (!tp->setlpicnt) {
2355                 if (current_link_up &&
2356                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2357                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2358                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2359                 }
2360
2361                 val = tr32(TG3_CPMU_EEE_MODE);
2362                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2363         }
2364 }
2365
2366 static void tg3_phy_eee_enable(struct tg3 *tp)
2367 {
2368         u32 val;
2369
2370         if (tp->link_config.active_speed == SPEED_1000 &&
2371             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2372              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2373              tg3_flag(tp, 57765_CLASS)) &&
2374             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2375                 val = MII_TG3_DSP_TAP26_ALNOKO |
2376                       MII_TG3_DSP_TAP26_RMRXSTO;
2377                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2378                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2379         }
2380
2381         val = tr32(TG3_CPMU_EEE_MODE);
2382         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2383 }
2384
2385 static int tg3_wait_macro_done(struct tg3 *tp)
2386 {
2387         int limit = 100;
2388
2389         while (limit--) {
2390                 u32 tmp32;
2391
2392                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2393                         if ((tmp32 & 0x1000) == 0)
2394                                 break;
2395                 }
2396         }
2397         if (limit < 0)
2398                 return -EBUSY;
2399
2400         return 0;
2401 }
2402
2403 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2404 {
2405         static const u32 test_pat[4][6] = {
2406         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2407         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2408         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2409         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2410         };
2411         int chan;
2412
2413         for (chan = 0; chan < 4; chan++) {
2414                 int i;
2415
2416                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2417                              (chan * 0x2000) | 0x0200);
2418                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2419
2420                 for (i = 0; i < 6; i++)
2421                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2422                                      test_pat[chan][i]);
2423
2424                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2425                 if (tg3_wait_macro_done(tp)) {
2426                         *resetp = 1;
2427                         return -EBUSY;
2428                 }
2429
2430                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2431                              (chan * 0x2000) | 0x0200);
2432                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2433                 if (tg3_wait_macro_done(tp)) {
2434                         *resetp = 1;
2435                         return -EBUSY;
2436                 }
2437
2438                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2439                 if (tg3_wait_macro_done(tp)) {
2440                         *resetp = 1;
2441                         return -EBUSY;
2442                 }
2443
2444                 for (i = 0; i < 6; i += 2) {
2445                         u32 low, high;
2446
2447                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2448                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2449                             tg3_wait_macro_done(tp)) {
2450                                 *resetp = 1;
2451                                 return -EBUSY;
2452                         }
2453                         low &= 0x7fff;
2454                         high &= 0x000f;
2455                         if (low != test_pat[chan][i] ||
2456                             high != test_pat[chan][i+1]) {
2457                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2458                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2459                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2460
2461                                 return -EBUSY;
2462                         }
2463                 }
2464         }
2465
2466         return 0;
2467 }
2468
2469 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2470 {
2471         int chan;
2472
2473         for (chan = 0; chan < 4; chan++) {
2474                 int i;
2475
2476                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477                              (chan * 0x2000) | 0x0200);
2478                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479                 for (i = 0; i < 6; i++)
2480                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2481                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482                 if (tg3_wait_macro_done(tp))
2483                         return -EBUSY;
2484         }
2485
2486         return 0;
2487 }
2488
2489 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2490 {
2491         u32 reg32, phy9_orig;
2492         int retries, do_phy_reset, err;
2493
2494         retries = 10;
2495         do_phy_reset = 1;
2496         do {
2497                 if (do_phy_reset) {
2498                         err = tg3_bmcr_reset(tp);
2499                         if (err)
2500                                 return err;
2501                         do_phy_reset = 0;
2502                 }
2503
2504                 /* Disable transmitter and interrupt.  */
2505                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2506                         continue;
2507
2508                 reg32 |= 0x3000;
2509                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2510
2511                 /* Set full-duplex, 1000 mbps.  */
2512                 tg3_writephy(tp, MII_BMCR,
2513                              BMCR_FULLDPLX | BMCR_SPEED1000);
2514
2515                 /* Set to master mode.  */
2516                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2517                         continue;
2518
2519                 tg3_writephy(tp, MII_CTRL1000,
2520                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2521
2522                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2523                 if (err)
2524                         return err;
2525
2526                 /* Block the PHY control access.  */
2527                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2528
2529                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2530                 if (!err)
2531                         break;
2532         } while (--retries);
2533
2534         err = tg3_phy_reset_chanpat(tp);
2535         if (err)
2536                 return err;
2537
2538         tg3_phydsp_write(tp, 0x8005, 0x0000);
2539
2540         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2541         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2542
2543         tg3_phy_toggle_auxctl_smdsp(tp, false);
2544
2545         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2546
2547         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2548                 reg32 &= ~0x3000;
2549                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2550         } else if (!err)
2551                 err = -EBUSY;
2552
2553         return err;
2554 }
2555
2556 static void tg3_carrier_off(struct tg3 *tp)
2557 {
2558         netif_carrier_off(tp->dev);
2559         tp->link_up = false;
2560 }
2561
2562 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2563 {
2564         if (tg3_flag(tp, ENABLE_ASF))
2565                 netdev_warn(tp->dev,
2566                             "Management side-band traffic will be interrupted during phy settings change\n");
2567 }
2568
2569 /* This will reset the tigon3 PHY if there is no valid
2570  * link unless the FORCE argument is non-zero.
2571  */
2572 static int tg3_phy_reset(struct tg3 *tp)
2573 {
2574         u32 val, cpmuctrl;
2575         int err;
2576
2577         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2578                 val = tr32(GRC_MISC_CFG);
2579                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2580                 udelay(40);
2581         }
2582         err  = tg3_readphy(tp, MII_BMSR, &val);
2583         err |= tg3_readphy(tp, MII_BMSR, &val);
2584         if (err != 0)
2585                 return -EBUSY;
2586
2587         if (netif_running(tp->dev) && tp->link_up) {
2588                 netif_carrier_off(tp->dev);
2589                 tg3_link_report(tp);
2590         }
2591
2592         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2593             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2594             tg3_asic_rev(tp) == ASIC_REV_5705) {
2595                 err = tg3_phy_reset_5703_4_5(tp);
2596                 if (err)
2597                         return err;
2598                 goto out;
2599         }
2600
2601         cpmuctrl = 0;
2602         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2603             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2604                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2605                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2606                         tw32(TG3_CPMU_CTRL,
2607                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2608         }
2609
2610         err = tg3_bmcr_reset(tp);
2611         if (err)
2612                 return err;
2613
2614         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2615                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2616                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2617
2618                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2619         }
2620
2621         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2622             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2623                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2624                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2625                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2626                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2627                         udelay(40);
2628                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2629                 }
2630         }
2631
2632         if (tg3_flag(tp, 5717_PLUS) &&
2633             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2634                 return 0;
2635
2636         tg3_phy_apply_otp(tp);
2637
2638         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2639                 tg3_phy_toggle_apd(tp, true);
2640         else
2641                 tg3_phy_toggle_apd(tp, false);
2642
2643 out:
2644         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2645             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2646                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2647                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2648                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2649         }
2650
2651         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2652                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2653                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2654         }
2655
2656         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2657                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2658                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2659                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2660                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2661                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2662                 }
2663         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2664                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2665                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2666                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2667                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2668                                 tg3_writephy(tp, MII_TG3_TEST1,
2669                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2670                         } else
2671                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2672
2673                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2674                 }
2675         }
2676
2677         /* Set Extended packet length bit (bit 14) on all chips that */
2678         /* support jumbo frames */
2679         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2680                 /* Cannot do read-modify-write on 5401 */
2681                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2682         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2683                 /* Set bit 14 with read-modify-write to preserve other bits */
2684                 err = tg3_phy_auxctl_read(tp,
2685                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2686                 if (!err)
2687                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2688                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2689         }
2690
2691         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2692          * jumbo frames transmission.
2693          */
2694         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2695                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2696                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2697                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2698         }
2699
2700         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2701                 /* adjust output voltage */
2702                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2703         }
2704
2705         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2706                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2707
2708         tg3_phy_toggle_automdix(tp, true);
2709         tg3_phy_set_wirespeed(tp);
2710         return 0;
2711 }
2712
2713 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2714 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2715 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2716                                           TG3_GPIO_MSG_NEED_VAUX)
2717 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2718         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2719          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2720          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2721          (TG3_GPIO_MSG_DRVR_PRES << 12))
2722
2723 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2724         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2725          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2726          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2727          (TG3_GPIO_MSG_NEED_VAUX << 12))
2728
2729 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2730 {
2731         u32 status, shift;
2732
2733         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2734             tg3_asic_rev(tp) == ASIC_REV_5719)
2735                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2736         else
2737                 status = tr32(TG3_CPMU_DRV_STATUS);
2738
2739         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2740         status &= ~(TG3_GPIO_MSG_MASK << shift);
2741         status |= (newstat << shift);
2742
2743         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2744             tg3_asic_rev(tp) == ASIC_REV_5719)
2745                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2746         else
2747                 tw32(TG3_CPMU_DRV_STATUS, status);
2748
2749         return status >> TG3_APE_GPIO_MSG_SHIFT;
2750 }
2751
2752 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2753 {
2754         if (!tg3_flag(tp, IS_NIC))
2755                 return 0;
2756
2757         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2758             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2759             tg3_asic_rev(tp) == ASIC_REV_5720) {
2760                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2761                         return -EIO;
2762
2763                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2764
2765                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2766                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2767
2768                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2769         } else {
2770                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2771                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2772         }
2773
2774         return 0;
2775 }
2776
2777 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2778 {
2779         u32 grc_local_ctrl;
2780
2781         if (!tg3_flag(tp, IS_NIC) ||
2782             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2783             tg3_asic_rev(tp) == ASIC_REV_5701)
2784                 return;
2785
2786         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2787
2788         tw32_wait_f(GRC_LOCAL_CTRL,
2789                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2790                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2791
2792         tw32_wait_f(GRC_LOCAL_CTRL,
2793                     grc_local_ctrl,
2794                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2795
2796         tw32_wait_f(GRC_LOCAL_CTRL,
2797                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2798                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2799 }
2800
2801 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2802 {
2803         if (!tg3_flag(tp, IS_NIC))
2804                 return;
2805
2806         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2807             tg3_asic_rev(tp) == ASIC_REV_5701) {
2808                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2809                             (GRC_LCLCTRL_GPIO_OE0 |
2810                              GRC_LCLCTRL_GPIO_OE1 |
2811                              GRC_LCLCTRL_GPIO_OE2 |
2812                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2813                              GRC_LCLCTRL_GPIO_OUTPUT1),
2814                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2815         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2816                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2817                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2818                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2819                                      GRC_LCLCTRL_GPIO_OE1 |
2820                                      GRC_LCLCTRL_GPIO_OE2 |
2821                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2822                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2823                                      tp->grc_local_ctrl;
2824                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2825                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2826
2827                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2828                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2829                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2830
2831                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2832                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2833                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2834         } else {
2835                 u32 no_gpio2;
2836                 u32 grc_local_ctrl = 0;
2837
2838                 /* Workaround to prevent overdrawing Amps. */
2839                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2840                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2841                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2842                                     grc_local_ctrl,
2843                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2844                 }
2845
2846                 /* On 5753 and variants, GPIO2 cannot be used. */
2847                 no_gpio2 = tp->nic_sram_data_cfg &
2848                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2849
2850                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2851                                   GRC_LCLCTRL_GPIO_OE1 |
2852                                   GRC_LCLCTRL_GPIO_OE2 |
2853                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2854                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2855                 if (no_gpio2) {
2856                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2857                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2858                 }
2859                 tw32_wait_f(GRC_LOCAL_CTRL,
2860                             tp->grc_local_ctrl | grc_local_ctrl,
2861                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2862
2863                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2864
2865                 tw32_wait_f(GRC_LOCAL_CTRL,
2866                             tp->grc_local_ctrl | grc_local_ctrl,
2867                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2868
2869                 if (!no_gpio2) {
2870                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2871                         tw32_wait_f(GRC_LOCAL_CTRL,
2872                                     tp->grc_local_ctrl | grc_local_ctrl,
2873                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2874                 }
2875         }
2876 }
2877
2878 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2879 {
2880         u32 msg = 0;
2881
2882         /* Serialize power state transitions */
2883         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2884                 return;
2885
2886         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2887                 msg = TG3_GPIO_MSG_NEED_VAUX;
2888
2889         msg = tg3_set_function_status(tp, msg);
2890
2891         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2892                 goto done;
2893
2894         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2895                 tg3_pwrsrc_switch_to_vaux(tp);
2896         else
2897                 tg3_pwrsrc_die_with_vmain(tp);
2898
2899 done:
2900         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2901 }
2902
2903 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2904 {
2905         bool need_vaux = false;
2906
2907         /* The GPIOs do something completely different on 57765. */
2908         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2909                 return;
2910
2911         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2912             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2913             tg3_asic_rev(tp) == ASIC_REV_5720) {
2914                 tg3_frob_aux_power_5717(tp, include_wol ?
2915                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2916                 return;
2917         }
2918
2919         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2920                 struct net_device *dev_peer;
2921
2922                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2923
2924                 /* remove_one() may have been run on the peer. */
2925                 if (dev_peer) {
2926                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2927
2928                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2929                                 return;
2930
2931                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2932                             tg3_flag(tp_peer, ENABLE_ASF))
2933                                 need_vaux = true;
2934                 }
2935         }
2936
2937         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2938             tg3_flag(tp, ENABLE_ASF))
2939                 need_vaux = true;
2940
2941         if (need_vaux)
2942                 tg3_pwrsrc_switch_to_vaux(tp);
2943         else
2944                 tg3_pwrsrc_die_with_vmain(tp);
2945 }
2946
2947 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2948 {
2949         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2950                 return 1;
2951         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2952                 if (speed != SPEED_10)
2953                         return 1;
2954         } else if (speed == SPEED_10)
2955                 return 1;
2956
2957         return 0;
2958 }
2959
2960 static bool tg3_phy_power_bug(struct tg3 *tp)
2961 {
2962         switch (tg3_asic_rev(tp)) {
2963         case ASIC_REV_5700:
2964         case ASIC_REV_5704:
2965                 return true;
2966         case ASIC_REV_5780:
2967                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2968                         return true;
2969                 return false;
2970         case ASIC_REV_5717:
2971                 if (!tp->pci_fn)
2972                         return true;
2973                 return false;
2974         case ASIC_REV_5719:
2975         case ASIC_REV_5720:
2976                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2977                     !tp->pci_fn)
2978                         return true;
2979                 return false;
2980         }
2981
2982         return false;
2983 }
2984
2985 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2986 {
2987         u32 val;
2988
2989         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2990                 return;
2991
2992         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2993                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2994                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2995                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2996
2997                         sg_dig_ctrl |=
2998                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2999                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3000                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3001                 }
3002                 return;
3003         }
3004
3005         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3006                 tg3_bmcr_reset(tp);
3007                 val = tr32(GRC_MISC_CFG);
3008                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3009                 udelay(40);
3010                 return;
3011         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3012                 u32 phytest;
3013                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3014                         u32 phy;
3015
3016                         tg3_writephy(tp, MII_ADVERTISE, 0);
3017                         tg3_writephy(tp, MII_BMCR,
3018                                      BMCR_ANENABLE | BMCR_ANRESTART);
3019
3020                         tg3_writephy(tp, MII_TG3_FET_TEST,
3021                                      phytest | MII_TG3_FET_SHADOW_EN);
3022                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3023                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3024                                 tg3_writephy(tp,
3025                                              MII_TG3_FET_SHDW_AUXMODE4,
3026                                              phy);
3027                         }
3028                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3029                 }
3030                 return;
3031         } else if (do_low_power) {
3032                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3033                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3034
3035                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3036                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3037                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3038                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3039         }
3040
3041         /* The PHY should not be powered down on some chips because
3042          * of bugs.
3043          */
3044         if (tg3_phy_power_bug(tp))
3045                 return;
3046
3047         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3048             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3049                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3050                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3051                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3052                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3053         }
3054
3055         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3056 }
3057
3058 /* tp->lock is held. */
3059 static int tg3_nvram_lock(struct tg3 *tp)
3060 {
3061         if (tg3_flag(tp, NVRAM)) {
3062                 int i;
3063
3064                 if (tp->nvram_lock_cnt == 0) {
3065                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3066                         for (i = 0; i < 8000; i++) {
3067                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3068                                         break;
3069                                 udelay(20);
3070                         }
3071                         if (i == 8000) {
3072                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3073                                 return -ENODEV;
3074                         }
3075                 }
3076                 tp->nvram_lock_cnt++;
3077         }
3078         return 0;
3079 }
3080
3081 /* tp->lock is held. */
3082 static void tg3_nvram_unlock(struct tg3 *tp)
3083 {
3084         if (tg3_flag(tp, NVRAM)) {
3085                 if (tp->nvram_lock_cnt > 0)
3086                         tp->nvram_lock_cnt--;
3087                 if (tp->nvram_lock_cnt == 0)
3088                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3089         }
3090 }
3091
3092 /* tp->lock is held. */
3093 static void tg3_enable_nvram_access(struct tg3 *tp)
3094 {
3095         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3096                 u32 nvaccess = tr32(NVRAM_ACCESS);
3097
3098                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3099         }
3100 }
3101
3102 /* tp->lock is held. */
3103 static void tg3_disable_nvram_access(struct tg3 *tp)
3104 {
3105         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3106                 u32 nvaccess = tr32(NVRAM_ACCESS);
3107
3108                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3109         }
3110 }
3111
3112 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3113                                         u32 offset, u32 *val)
3114 {
3115         u32 tmp;
3116         int i;
3117
3118         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3119                 return -EINVAL;
3120
3121         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3122                                         EEPROM_ADDR_DEVID_MASK |
3123                                         EEPROM_ADDR_READ);
3124         tw32(GRC_EEPROM_ADDR,
3125              tmp |
3126              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3127              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3128               EEPROM_ADDR_ADDR_MASK) |
3129              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3130
3131         for (i = 0; i < 1000; i++) {
3132                 tmp = tr32(GRC_EEPROM_ADDR);
3133
3134                 if (tmp & EEPROM_ADDR_COMPLETE)
3135                         break;
3136                 msleep(1);
3137         }
3138         if (!(tmp & EEPROM_ADDR_COMPLETE))
3139                 return -EBUSY;
3140
3141         tmp = tr32(GRC_EEPROM_DATA);
3142
3143         /*
3144          * The data will always be opposite the native endian
3145          * format.  Perform a blind byteswap to compensate.
3146          */
3147         *val = swab32(tmp);
3148
3149         return 0;
3150 }
3151
3152 #define NVRAM_CMD_TIMEOUT 10000
3153
3154 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3155 {
3156         int i;
3157
3158         tw32(NVRAM_CMD, nvram_cmd);
3159         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3160                 udelay(10);
3161                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3162                         udelay(10);
3163                         break;
3164                 }
3165         }
3166
3167         if (i == NVRAM_CMD_TIMEOUT)
3168                 return -EBUSY;
3169
3170         return 0;
3171 }
3172
3173 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3174 {
3175         if (tg3_flag(tp, NVRAM) &&
3176             tg3_flag(tp, NVRAM_BUFFERED) &&
3177             tg3_flag(tp, FLASH) &&
3178             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3179             (tp->nvram_jedecnum == JEDEC_ATMEL))
3180
3181                 addr = ((addr / tp->nvram_pagesize) <<
3182                         ATMEL_AT45DB0X1B_PAGE_POS) +
3183                        (addr % tp->nvram_pagesize);
3184
3185         return addr;
3186 }
3187
3188 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3189 {
3190         if (tg3_flag(tp, NVRAM) &&
3191             tg3_flag(tp, NVRAM_BUFFERED) &&
3192             tg3_flag(tp, FLASH) &&
3193             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3194             (tp->nvram_jedecnum == JEDEC_ATMEL))
3195
3196                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3197                         tp->nvram_pagesize) +
3198                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3199
3200         return addr;
3201 }
3202
3203 /* NOTE: Data read in from NVRAM is byteswapped according to
3204  * the byteswapping settings for all other register accesses.
3205  * tg3 devices are BE devices, so on a BE machine, the data
3206  * returned will be exactly as it is seen in NVRAM.  On a LE
3207  * machine, the 32-bit value will be byteswapped.
3208  */
3209 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3210 {
3211         int ret;
3212
3213         if (!tg3_flag(tp, NVRAM))
3214                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3215
3216         offset = tg3_nvram_phys_addr(tp, offset);
3217
3218         if (offset > NVRAM_ADDR_MSK)
3219                 return -EINVAL;
3220
3221         ret = tg3_nvram_lock(tp);
3222         if (ret)
3223                 return ret;
3224
3225         tg3_enable_nvram_access(tp);
3226
3227         tw32(NVRAM_ADDR, offset);
3228         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3229                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3230
3231         if (ret == 0)
3232                 *val = tr32(NVRAM_RDDATA);
3233
3234         tg3_disable_nvram_access(tp);
3235
3236         tg3_nvram_unlock(tp);
3237
3238         return ret;
3239 }
3240
3241 /* Ensures NVRAM data is in bytestream format. */
3242 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3243 {
3244         u32 v;
3245         int res = tg3_nvram_read(tp, offset, &v);
3246         if (!res)
3247                 *val = cpu_to_be32(v);
3248         return res;
3249 }
3250
3251 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3252                                     u32 offset, u32 len, u8 *buf)
3253 {
3254         int i, j, rc = 0;
3255         u32 val;
3256
3257         for (i = 0; i < len; i += 4) {
3258                 u32 addr;
3259                 __be32 data;
3260
3261                 addr = offset + i;
3262
3263                 memcpy(&data, buf + i, 4);
3264
3265                 /*
3266                  * The SEEPROM interface expects the data to always be opposite
3267                  * the native endian format.  We accomplish this by reversing
3268                  * all the operations that would have been performed on the
3269                  * data from a call to tg3_nvram_read_be32().
3270                  */
3271                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3272
3273                 val = tr32(GRC_EEPROM_ADDR);
3274                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3275
3276                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3277                         EEPROM_ADDR_READ);
3278                 tw32(GRC_EEPROM_ADDR, val |
3279                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3280                         (addr & EEPROM_ADDR_ADDR_MASK) |
3281                         EEPROM_ADDR_START |
3282                         EEPROM_ADDR_WRITE);
3283
3284                 for (j = 0; j < 1000; j++) {
3285                         val = tr32(GRC_EEPROM_ADDR);
3286
3287                         if (val & EEPROM_ADDR_COMPLETE)
3288                                 break;
3289                         msleep(1);
3290                 }
3291                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3292                         rc = -EBUSY;
3293                         break;
3294                 }
3295         }
3296
3297         return rc;
3298 }
3299
3300 /* offset and length are dword aligned */
3301 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3302                 u8 *buf)
3303 {
3304         int ret = 0;
3305         u32 pagesize = tp->nvram_pagesize;
3306         u32 pagemask = pagesize - 1;
3307         u32 nvram_cmd;
3308         u8 *tmp;
3309
3310         tmp = kmalloc(pagesize, GFP_KERNEL);
3311         if (tmp == NULL)
3312                 return -ENOMEM;
3313
3314         while (len) {
3315                 int j;
3316                 u32 phy_addr, page_off, size;
3317
3318                 phy_addr = offset & ~pagemask;
3319
3320                 for (j = 0; j < pagesize; j += 4) {
3321                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3322                                                   (__be32 *) (tmp + j));
3323                         if (ret)
3324                                 break;
3325                 }
3326                 if (ret)
3327                         break;
3328
3329                 page_off = offset & pagemask;
3330                 size = pagesize;
3331                 if (len < size)
3332                         size = len;
3333
3334                 len -= size;
3335
3336                 memcpy(tmp + page_off, buf, size);
3337
3338                 offset = offset + (pagesize - page_off);
3339
3340                 tg3_enable_nvram_access(tp);
3341
3342                 /*
3343                  * Before we can erase the flash page, we need
3344                  * to issue a special "write enable" command.
3345                  */
3346                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3347
3348                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3349                         break;
3350
3351                 /* Erase the target page */
3352                 tw32(NVRAM_ADDR, phy_addr);
3353
3354                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3355                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3356
3357                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3358                         break;
3359
3360                 /* Issue another write enable to start the write. */
3361                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3362
3363                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3364                         break;
3365
3366                 for (j = 0; j < pagesize; j += 4) {
3367                         __be32 data;
3368
3369                         data = *((__be32 *) (tmp + j));
3370
3371                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3372
3373                         tw32(NVRAM_ADDR, phy_addr + j);
3374
3375                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3376                                 NVRAM_CMD_WR;
3377
3378                         if (j == 0)
3379                                 nvram_cmd |= NVRAM_CMD_FIRST;
3380                         else if (j == (pagesize - 4))
3381                                 nvram_cmd |= NVRAM_CMD_LAST;
3382
3383                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3384                         if (ret)
3385                                 break;
3386                 }
3387                 if (ret)
3388                         break;
3389         }
3390
3391         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3392         tg3_nvram_exec_cmd(tp, nvram_cmd);
3393
3394         kfree(tmp);
3395
3396         return ret;
3397 }
3398
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3401                 u8 *buf)
3402 {
3403         int i, ret = 0;
3404
3405         for (i = 0; i < len; i += 4, offset += 4) {
3406                 u32 page_off, phy_addr, nvram_cmd;
3407                 __be32 data;
3408
3409                 memcpy(&data, buf + i, 4);
3410                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3411
3412                 page_off = offset % tp->nvram_pagesize;
3413
3414                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3415
3416                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3417
3418                 if (page_off == 0 || i == 0)
3419                         nvram_cmd |= NVRAM_CMD_FIRST;
3420                 if (page_off == (tp->nvram_pagesize - 4))
3421                         nvram_cmd |= NVRAM_CMD_LAST;
3422
3423                 if (i == (len - 4))
3424                         nvram_cmd |= NVRAM_CMD_LAST;
3425
3426                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3427                     !tg3_flag(tp, FLASH) ||
3428                     !tg3_flag(tp, 57765_PLUS))
3429                         tw32(NVRAM_ADDR, phy_addr);
3430
3431                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3432                     !tg3_flag(tp, 5755_PLUS) &&
3433                     (tp->nvram_jedecnum == JEDEC_ST) &&
3434                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3435                         u32 cmd;
3436
3437                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3438                         ret = tg3_nvram_exec_cmd(tp, cmd);
3439                         if (ret)
3440                                 break;
3441                 }
3442                 if (!tg3_flag(tp, FLASH)) {
3443                         /* We always do complete word writes to eeprom. */
3444                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3445                 }
3446
3447                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3448                 if (ret)
3449                         break;
3450         }
3451         return ret;
3452 }
3453
3454 /* offset and length are dword aligned */
3455 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3456 {
3457         int ret;
3458
3459         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3460                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3461                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3462                 udelay(40);
3463         }
3464
3465         if (!tg3_flag(tp, NVRAM)) {
3466                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3467         } else {
3468                 u32 grc_mode;
3469
3470                 ret = tg3_nvram_lock(tp);
3471                 if (ret)
3472                         return ret;
3473
3474                 tg3_enable_nvram_access(tp);
3475                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3476                         tw32(NVRAM_WRITE1, 0x406);
3477
3478                 grc_mode = tr32(GRC_MODE);
3479                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3480
3481                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3482                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3483                                 buf);
3484                 } else {
3485                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3486                                 buf);
3487                 }
3488
3489                 grc_mode = tr32(GRC_MODE);
3490                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3491
3492                 tg3_disable_nvram_access(tp);
3493                 tg3_nvram_unlock(tp);
3494         }
3495
3496         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3497                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3498                 udelay(40);
3499         }
3500
3501         return ret;
3502 }
3503
3504 #define RX_CPU_SCRATCH_BASE     0x30000
3505 #define RX_CPU_SCRATCH_SIZE     0x04000
3506 #define TX_CPU_SCRATCH_BASE     0x34000
3507 #define TX_CPU_SCRATCH_SIZE     0x04000
3508
3509 /* tp->lock is held. */
3510 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3511 {
3512         int i;
3513         const int iters = 10000;
3514
3515         for (i = 0; i < iters; i++) {
3516                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3517                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3518                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3519                         break;
3520         }
3521
3522         return (i == iters) ? -EBUSY : 0;
3523 }
3524
3525 /* tp->lock is held. */
3526 static int tg3_rxcpu_pause(struct tg3 *tp)
3527 {
3528         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3529
3530         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3531         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3532         udelay(10);
3533
3534         return rc;
3535 }
3536
3537 /* tp->lock is held. */
3538 static int tg3_txcpu_pause(struct tg3 *tp)
3539 {
3540         return tg3_pause_cpu(tp, TX_CPU_BASE);
3541 }
3542
3543 /* tp->lock is held. */
3544 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3545 {
3546         tw32(cpu_base + CPU_STATE, 0xffffffff);
3547         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3548 }
3549
3550 /* tp->lock is held. */
3551 static void tg3_rxcpu_resume(struct tg3 *tp)
3552 {
3553         tg3_resume_cpu(tp, RX_CPU_BASE);
3554 }
3555
3556 /* tp->lock is held. */
3557 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3558 {
3559         int rc;
3560
3561         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3562
3563         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3564                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3565
3566                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3567                 return 0;
3568         }
3569         if (cpu_base == RX_CPU_BASE) {
3570                 rc = tg3_rxcpu_pause(tp);
3571         } else {
3572                 /*
3573                  * There is only an Rx CPU for the 5750 derivative in the
3574                  * BCM4785.
3575                  */
3576                 if (tg3_flag(tp, IS_SSB_CORE))
3577                         return 0;
3578
3579                 rc = tg3_txcpu_pause(tp);
3580         }
3581
3582         if (rc) {
3583                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3584                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3585                 return -ENODEV;
3586         }
3587
3588         /* Clear firmware's nvram arbitration. */
3589         if (tg3_flag(tp, NVRAM))
3590                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3591         return 0;
3592 }
3593
3594 static int tg3_fw_data_len(struct tg3 *tp,
3595                            const struct tg3_firmware_hdr *fw_hdr)
3596 {
3597         int fw_len;
3598
3599         /* Non fragmented firmware have one firmware header followed by a
3600          * contiguous chunk of data to be written. The length field in that
3601          * header is not the length of data to be written but the complete
3602          * length of the bss. The data length is determined based on
3603          * tp->fw->size minus headers.
3604          *
3605          * Fragmented firmware have a main header followed by multiple
3606          * fragments. Each fragment is identical to non fragmented firmware
3607          * with a firmware header followed by a contiguous chunk of data. In
3608          * the main header, the length field is unused and set to 0xffffffff.
3609          * In each fragment header the length is the entire size of that
3610          * fragment i.e. fragment data + header length. Data length is
3611          * therefore length field in the header minus TG3_FW_HDR_LEN.
3612          */
3613         if (tp->fw_len == 0xffffffff)
3614                 fw_len = be32_to_cpu(fw_hdr->len);
3615         else
3616                 fw_len = tp->fw->size;
3617
3618         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3619 }
3620
3621 /* tp->lock is held. */
3622 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3623                                  u32 cpu_scratch_base, int cpu_scratch_size,
3624                                  const struct tg3_firmware_hdr *fw_hdr)
3625 {
3626         int err, i;
3627         void (*write_op)(struct tg3 *, u32, u32);
3628         int total_len = tp->fw->size;
3629
3630         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3631                 netdev_err(tp->dev,
3632                            "%s: Trying to load TX cpu firmware which is 5705\n",
3633                            __func__);
3634                 return -EINVAL;
3635         }
3636
3637         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3638                 write_op = tg3_write_mem;
3639         else
3640                 write_op = tg3_write_indirect_reg32;
3641
3642         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3643                 /* It is possible that bootcode is still loading at this point.
3644                  * Get the nvram lock first before halting the cpu.
3645                  */
3646                 int lock_err = tg3_nvram_lock(tp);
3647                 err = tg3_halt_cpu(tp, cpu_base);
3648                 if (!lock_err)
3649                         tg3_nvram_unlock(tp);
3650                 if (err)
3651                         goto out;
3652
3653                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3654                         write_op(tp, cpu_scratch_base + i, 0);
3655                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3656                 tw32(cpu_base + CPU_MODE,
3657                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3658         } else {
3659                 /* Subtract additional main header for fragmented firmware and
3660                  * advance to the first fragment
3661                  */
3662                 total_len -= TG3_FW_HDR_LEN;
3663                 fw_hdr++;
3664         }
3665
3666         do {
3667                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3668                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3669                         write_op(tp, cpu_scratch_base +
3670                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3671                                      (i * sizeof(u32)),
3672                                  be32_to_cpu(fw_data[i]));
3673
3674                 total_len -= be32_to_cpu(fw_hdr->len);
3675
3676                 /* Advance to next fragment */
3677                 fw_hdr = (struct tg3_firmware_hdr *)
3678                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3679         } while (total_len > 0);
3680
3681         err = 0;
3682
3683 out:
3684         return err;
3685 }
3686
3687 /* tp->lock is held. */
3688 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3689 {
3690         int i;
3691         const int iters = 5;
3692
3693         tw32(cpu_base + CPU_STATE, 0xffffffff);
3694         tw32_f(cpu_base + CPU_PC, pc);
3695
3696         for (i = 0; i < iters; i++) {
3697                 if (tr32(cpu_base + CPU_PC) == pc)
3698                         break;
3699                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3700                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3701                 tw32_f(cpu_base + CPU_PC, pc);
3702                 udelay(1000);
3703         }
3704
3705         return (i == iters) ? -EBUSY : 0;
3706 }
3707
3708 /* tp->lock is held. */
3709 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3710 {
3711         const struct tg3_firmware_hdr *fw_hdr;
3712         int err;
3713
3714         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3715
3716         /* Firmware blob starts with version numbers, followed by
3717            start address and length. We are setting complete length.
3718            length = end_address_of_bss - start_address_of_text.
3719            Remainder is the blob to be loaded contiguously
3720            from start address. */
3721
3722         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3723                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3724                                     fw_hdr);
3725         if (err)
3726                 return err;
3727
3728         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3729                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3730                                     fw_hdr);
3731         if (err)
3732                 return err;
3733
3734         /* Now startup only the RX cpu. */
3735         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3736                                        be32_to_cpu(fw_hdr->base_addr));
3737         if (err) {
3738                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3739                            "should be %08x\n", __func__,
3740                            tr32(RX_CPU_BASE + CPU_PC),
3741                                 be32_to_cpu(fw_hdr->base_addr));
3742                 return -ENODEV;
3743         }
3744
3745         tg3_rxcpu_resume(tp);
3746
3747         return 0;
3748 }
3749
3750 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3751 {
3752         const int iters = 1000;
3753         int i;
3754         u32 val;
3755
3756         /* Wait for boot code to complete initialization and enter service
3757          * loop. It is then safe to download service patches
3758          */
3759         for (i = 0; i < iters; i++) {
3760                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3761                         break;
3762
3763                 udelay(10);
3764         }
3765
3766         if (i == iters) {
3767                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3768                 return -EBUSY;
3769         }
3770
3771         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3772         if (val & 0xff) {
3773                 netdev_warn(tp->dev,
3774                             "Other patches exist. Not downloading EEE patch\n");
3775                 return -EEXIST;
3776         }
3777
3778         return 0;
3779 }
3780
3781 /* tp->lock is held. */
3782 static void tg3_load_57766_firmware(struct tg3 *tp)
3783 {
3784         struct tg3_firmware_hdr *fw_hdr;
3785
3786         if (!tg3_flag(tp, NO_NVRAM))
3787                 return;
3788
3789         if (tg3_validate_rxcpu_state(tp))
3790                 return;
3791
3792         if (!tp->fw)
3793                 return;
3794
3795         /* This firmware blob has a different format than older firmware
3796          * releases as given below. The main difference is we have fragmented
3797          * data to be written to non-contiguous locations.
3798          *
3799          * In the beginning we have a firmware header identical to other
3800          * firmware which consists of version, base addr and length. The length
3801          * here is unused and set to 0xffffffff.
3802          *
3803          * This is followed by a series of firmware fragments which are
3804          * individually identical to previous firmware. i.e. they have the
3805          * firmware header and followed by data for that fragment. The version
3806          * field of the individual fragment header is unused.
3807          */
3808
3809         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3810         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3811                 return;
3812
3813         if (tg3_rxcpu_pause(tp))
3814                 return;
3815
3816         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3817         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3818
3819         tg3_rxcpu_resume(tp);
3820 }
3821
3822 /* tp->lock is held. */
3823 static int tg3_load_tso_firmware(struct tg3 *tp)
3824 {
3825         const struct tg3_firmware_hdr *fw_hdr;
3826         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3827         int err;
3828
3829         if (!tg3_flag(tp, FW_TSO))
3830                 return 0;
3831
3832         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3833
3834         /* Firmware blob starts with version numbers, followed by
3835            start address and length. We are setting complete length.
3836            length = end_address_of_bss - start_address_of_text.
3837            Remainder is the blob to be loaded contiguously
3838            from start address. */
3839
3840         cpu_scratch_size = tp->fw_len;
3841
3842         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3843                 cpu_base = RX_CPU_BASE;
3844                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3845         } else {
3846                 cpu_base = TX_CPU_BASE;
3847                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3848                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3849         }
3850
3851         err = tg3_load_firmware_cpu(tp, cpu_base,
3852                                     cpu_scratch_base, cpu_scratch_size,
3853                                     fw_hdr);
3854         if (err)
3855                 return err;
3856
3857         /* Now startup the cpu. */
3858         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3859                                        be32_to_cpu(fw_hdr->base_addr));
3860         if (err) {
3861                 netdev_err(tp->dev,
3862                            "%s fails to set CPU PC, is %08x should be %08x\n",
3863                            __func__, tr32(cpu_base + CPU_PC),
3864                            be32_to_cpu(fw_hdr->base_addr));
3865                 return -ENODEV;
3866         }
3867
3868         tg3_resume_cpu(tp, cpu_base);
3869         return 0;
3870 }
3871
3872
3873 /* tp->lock is held. */
3874 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3875 {
3876         u32 addr_high, addr_low;
3877         int i;
3878
3879         addr_high = ((tp->dev->dev_addr[0] << 8) |
3880                      tp->dev->dev_addr[1]);
3881         addr_low = ((tp->dev->dev_addr[2] << 24) |
3882                     (tp->dev->dev_addr[3] << 16) |
3883                     (tp->dev->dev_addr[4] <<  8) |
3884                     (tp->dev->dev_addr[5] <<  0));
3885         for (i = 0; i < 4; i++) {
3886                 if (i == 1 && skip_mac_1)
3887                         continue;
3888                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3889                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3890         }
3891
3892         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3893             tg3_asic_rev(tp) == ASIC_REV_5704) {
3894                 for (i = 0; i < 12; i++) {
3895                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3896                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3897                 }
3898         }
3899
3900         addr_high = (tp->dev->dev_addr[0] +
3901                      tp->dev->dev_addr[1] +
3902                      tp->dev->dev_addr[2] +
3903                      tp->dev->dev_addr[3] +
3904                      tp->dev->dev_addr[4] +
3905                      tp->dev->dev_addr[5]) &
3906                 TX_BACKOFF_SEED_MASK;
3907         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3908 }
3909
3910 static void tg3_enable_register_access(struct tg3 *tp)
3911 {
3912         /*
3913          * Make sure register accesses (indirect or otherwise) will function
3914          * correctly.
3915          */
3916         pci_write_config_dword(tp->pdev,
3917                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3918 }
3919
3920 static int tg3_power_up(struct tg3 *tp)
3921 {
3922         int err;
3923
3924         tg3_enable_register_access(tp);
3925
3926         err = pci_set_power_state(tp->pdev, PCI_D0);
3927         if (!err) {
3928                 /* Switch out of Vaux if it is a NIC */
3929                 tg3_pwrsrc_switch_to_vmain(tp);
3930         } else {
3931                 netdev_err(tp->dev, "Transition to D0 failed\n");
3932         }
3933
3934         return err;
3935 }
3936
3937 static int tg3_setup_phy(struct tg3 *, bool);
3938
3939 static int tg3_power_down_prepare(struct tg3 *tp)
3940 {
3941         u32 misc_host_ctrl;
3942         bool device_should_wake, do_low_power;
3943
3944         tg3_enable_register_access(tp);
3945
3946         /* Restore the CLKREQ setting. */
3947         if (tg3_flag(tp, CLKREQ_BUG))
3948                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3949                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3950
3951         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3952         tw32(TG3PCI_MISC_HOST_CTRL,
3953              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3954
3955         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3956                              tg3_flag(tp, WOL_ENABLE);
3957
3958         if (tg3_flag(tp, USE_PHYLIB)) {
3959                 do_low_power = false;
3960                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3961                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3962                         struct phy_device *phydev;
3963                         u32 phyid, advertising;
3964
3965                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3966
3967                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3968
3969                         tp->link_config.speed = phydev->speed;
3970                         tp->link_config.duplex = phydev->duplex;
3971                         tp->link_config.autoneg = phydev->autoneg;
3972                         tp->link_config.advertising = phydev->advertising;
3973
3974                         advertising = ADVERTISED_TP |
3975                                       ADVERTISED_Pause |
3976                                       ADVERTISED_Autoneg |
3977                                       ADVERTISED_10baseT_Half;
3978
3979                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3980                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3981                                         advertising |=
3982                                                 ADVERTISED_100baseT_Half |
3983                                                 ADVERTISED_100baseT_Full |
3984                                                 ADVERTISED_10baseT_Full;
3985                                 else
3986                                         advertising |= ADVERTISED_10baseT_Full;
3987                         }
3988
3989                         phydev->advertising = advertising;
3990
3991                         phy_start_aneg(phydev);
3992
3993                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3994                         if (phyid != PHY_ID_BCMAC131) {
3995                                 phyid &= PHY_BCM_OUI_MASK;
3996                                 if (phyid == PHY_BCM_OUI_1 ||
3997                                     phyid == PHY_BCM_OUI_2 ||
3998                                     phyid == PHY_BCM_OUI_3)
3999                                         do_low_power = true;
4000                         }
4001                 }
4002         } else {
4003                 do_low_power = true;
4004
4005                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4006                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4007
4008                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4009                         tg3_setup_phy(tp, false);
4010         }
4011
4012         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4013                 u32 val;
4014
4015                 val = tr32(GRC_VCPU_EXT_CTRL);
4016                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4017         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4018                 int i;
4019                 u32 val;
4020
4021                 for (i = 0; i < 200; i++) {
4022                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4023                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4024                                 break;
4025                         msleep(1);
4026                 }
4027         }
4028         if (tg3_flag(tp, WOL_CAP))
4029                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4030                                                      WOL_DRV_STATE_SHUTDOWN |
4031                                                      WOL_DRV_WOL |
4032                                                      WOL_SET_MAGIC_PKT);
4033
4034         if (device_should_wake) {
4035                 u32 mac_mode;
4036
4037                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4038                         if (do_low_power &&
4039                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4040                                 tg3_phy_auxctl_write(tp,
4041                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4042                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4043                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4044                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4045                                 udelay(40);
4046                         }
4047
4048                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4049                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4050                         else if (tp->phy_flags &
4051                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4052                                 if (tp->link_config.active_speed == SPEED_1000)
4053                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4054                                 else
4055                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4056                         } else
4057                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4058
4059                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4060                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4061                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4062                                              SPEED_100 : SPEED_10;
4063                                 if (tg3_5700_link_polarity(tp, speed))
4064                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4065                                 else
4066                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4067                         }
4068                 } else {
4069                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4070                 }
4071
4072                 if (!tg3_flag(tp, 5750_PLUS))
4073                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4074
4075                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4076                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4077                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4078                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4079
4080                 if (tg3_flag(tp, ENABLE_APE))
4081                         mac_mode |= MAC_MODE_APE_TX_EN |
4082                                     MAC_MODE_APE_RX_EN |
4083                                     MAC_MODE_TDE_ENABLE;
4084
4085                 tw32_f(MAC_MODE, mac_mode);
4086                 udelay(100);
4087
4088                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4089                 udelay(10);
4090         }
4091
4092         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4093             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4094              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4095                 u32 base_val;
4096
4097                 base_val = tp->pci_clock_ctrl;
4098                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4099                              CLOCK_CTRL_TXCLK_DISABLE);
4100
4101                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4102                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4103         } else if (tg3_flag(tp, 5780_CLASS) ||
4104                    tg3_flag(tp, CPMU_PRESENT) ||
4105                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4106                 /* do nothing */
4107         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4108                 u32 newbits1, newbits2;
4109
4110                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4111                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4112                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4113                                     CLOCK_CTRL_TXCLK_DISABLE |
4114                                     CLOCK_CTRL_ALTCLK);
4115                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4116                 } else if (tg3_flag(tp, 5705_PLUS)) {
4117                         newbits1 = CLOCK_CTRL_625_CORE;
4118                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4119                 } else {
4120                         newbits1 = CLOCK_CTRL_ALTCLK;
4121                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4122                 }
4123
4124                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4125                             40);
4126
4127                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4128                             40);
4129
4130                 if (!tg3_flag(tp, 5705_PLUS)) {
4131                         u32 newbits3;
4132
4133                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4134                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4135                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4136                                             CLOCK_CTRL_TXCLK_DISABLE |
4137                                             CLOCK_CTRL_44MHZ_CORE);
4138                         } else {
4139                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4140                         }
4141
4142                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4143                                     tp->pci_clock_ctrl | newbits3, 40);
4144                 }
4145         }
4146
4147         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4148                 tg3_power_down_phy(tp, do_low_power);
4149
4150         tg3_frob_aux_power(tp, true);
4151
4152         /* Workaround for unstable PLL clock */
4153         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4154             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4155              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4156                 u32 val = tr32(0x7d00);
4157
4158                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4159                 tw32(0x7d00, val);
4160                 if (!tg3_flag(tp, ENABLE_ASF)) {
4161                         int err;
4162
4163                         err = tg3_nvram_lock(tp);
4164                         tg3_halt_cpu(tp, RX_CPU_BASE);
4165                         if (!err)
4166                                 tg3_nvram_unlock(tp);
4167                 }
4168         }
4169
4170         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4171
4172         return 0;
4173 }
4174
4175 static void tg3_power_down(struct tg3 *tp)
4176 {
4177         tg3_power_down_prepare(tp);
4178
4179         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4180         pci_set_power_state(tp->pdev, PCI_D3hot);
4181 }
4182
4183 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4184 {
4185         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4186         case MII_TG3_AUX_STAT_10HALF:
4187                 *speed = SPEED_10;
4188                 *duplex = DUPLEX_HALF;
4189                 break;
4190
4191         case MII_TG3_AUX_STAT_10FULL:
4192                 *speed = SPEED_10;
4193                 *duplex = DUPLEX_FULL;
4194                 break;
4195
4196         case MII_TG3_AUX_STAT_100HALF:
4197                 *speed = SPEED_100;
4198                 *duplex = DUPLEX_HALF;
4199                 break;
4200
4201         case MII_TG3_AUX_STAT_100FULL:
4202                 *speed = SPEED_100;
4203                 *duplex = DUPLEX_FULL;
4204                 break;
4205
4206         case MII_TG3_AUX_STAT_1000HALF:
4207                 *speed = SPEED_1000;
4208                 *duplex = DUPLEX_HALF;
4209                 break;
4210
4211         case MII_TG3_AUX_STAT_1000FULL:
4212                 *speed = SPEED_1000;
4213                 *duplex = DUPLEX_FULL;
4214                 break;
4215
4216         default:
4217                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4218                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4219                                  SPEED_10;
4220                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4221                                   DUPLEX_HALF;
4222                         break;
4223                 }
4224                 *speed = SPEED_UNKNOWN;
4225                 *duplex = DUPLEX_UNKNOWN;
4226                 break;
4227         }
4228 }
4229
4230 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4231 {
4232         int err = 0;
4233         u32 val, new_adv;
4234
4235         new_adv = ADVERTISE_CSMA;
4236         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4237         new_adv |= mii_advertise_flowctrl(flowctrl);
4238
4239         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4240         if (err)
4241                 goto done;
4242
4243         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4244                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4245
4246                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4247                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4248                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4249
4250                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4251                 if (err)
4252                         goto done;
4253         }
4254
4255         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4256                 goto done;
4257
4258         tw32(TG3_CPMU_EEE_MODE,
4259              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4260
4261         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4262         if (!err) {
4263                 u32 err2;
4264
4265                 val = 0;
4266                 /* Advertise 100-BaseTX EEE ability */
4267                 if (advertise & ADVERTISED_100baseT_Full)
4268                         val |= MDIO_AN_EEE_ADV_100TX;
4269                 /* Advertise 1000-BaseT EEE ability */
4270                 if (advertise & ADVERTISED_1000baseT_Full)
4271                         val |= MDIO_AN_EEE_ADV_1000T;
4272                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4273                 if (err)
4274                         val = 0;
4275
4276                 switch (tg3_asic_rev(tp)) {
4277                 case ASIC_REV_5717:
4278                 case ASIC_REV_57765:
4279                 case ASIC_REV_57766:
4280                 case ASIC_REV_5719:
4281                         /* If we advertised any eee advertisements above... */
4282                         if (val)
4283                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4284                                       MII_TG3_DSP_TAP26_RMRXSTO |
4285                                       MII_TG3_DSP_TAP26_OPCSINPT;
4286                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4287                         /* Fall through */
4288                 case ASIC_REV_5720:
4289                 case ASIC_REV_5762:
4290                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4291                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4292                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4293                 }
4294
4295                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4296                 if (!err)
4297                         err = err2;
4298         }
4299
4300 done:
4301         return err;
4302 }
4303
4304 static void tg3_phy_copper_begin(struct tg3 *tp)
4305 {
4306         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4307             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4308                 u32 adv, fc;
4309
4310                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4311                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4312                         adv = ADVERTISED_10baseT_Half |
4313                               ADVERTISED_10baseT_Full;
4314                         if (tg3_flag(tp, WOL_SPEED_100MB))
4315                                 adv |= ADVERTISED_100baseT_Half |
4316                                        ADVERTISED_100baseT_Full;
4317                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4318                                 adv |= ADVERTISED_1000baseT_Half |
4319                                        ADVERTISED_1000baseT_Full;
4320
4321                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4322                 } else {
4323                         adv = tp->link_config.advertising;
4324                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4325                                 adv &= ~(ADVERTISED_1000baseT_Half |
4326                                          ADVERTISED_1000baseT_Full);
4327
4328                         fc = tp->link_config.flowctrl;
4329                 }
4330
4331                 tg3_phy_autoneg_cfg(tp, adv, fc);
4332
4333                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4334                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4335                         /* Normally during power down we want to autonegotiate
4336                          * the lowest possible speed for WOL. However, to avoid
4337                          * link flap, we leave it untouched.
4338                          */
4339                         return;
4340                 }
4341
4342                 tg3_writephy(tp, MII_BMCR,
4343                              BMCR_ANENABLE | BMCR_ANRESTART);
4344         } else {
4345                 int i;
4346                 u32 bmcr, orig_bmcr;
4347
4348                 tp->link_config.active_speed = tp->link_config.speed;
4349                 tp->link_config.active_duplex = tp->link_config.duplex;
4350
4351                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4352                         /* With autoneg disabled, 5715 only links up when the
4353                          * advertisement register has the configured speed
4354                          * enabled.
4355                          */
4356                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4357                 }
4358
4359                 bmcr = 0;
4360                 switch (tp->link_config.speed) {
4361                 default:
4362                 case SPEED_10:
4363                         break;
4364
4365                 case SPEED_100:
4366                         bmcr |= BMCR_SPEED100;
4367                         break;
4368
4369                 case SPEED_1000:
4370                         bmcr |= BMCR_SPEED1000;
4371                         break;
4372                 }
4373
4374                 if (tp->link_config.duplex == DUPLEX_FULL)
4375                         bmcr |= BMCR_FULLDPLX;
4376
4377                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4378                     (bmcr != orig_bmcr)) {
4379                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4380                         for (i = 0; i < 1500; i++) {
4381                                 u32 tmp;
4382
4383                                 udelay(10);
4384                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4385                                     tg3_readphy(tp, MII_BMSR, &tmp))
4386                                         continue;
4387                                 if (!(tmp & BMSR_LSTATUS)) {
4388                                         udelay(40);
4389                                         break;
4390                                 }
4391                         }
4392                         tg3_writephy(tp, MII_BMCR, bmcr);
4393                         udelay(40);
4394                 }
4395         }
4396 }
4397
4398 static int tg3_phy_pull_config(struct tg3 *tp)
4399 {
4400         int err;
4401         u32 val;
4402
4403         err = tg3_readphy(tp, MII_BMCR, &val);
4404         if (err)
4405                 goto done;
4406
4407         if (!(val & BMCR_ANENABLE)) {
4408                 tp->link_config.autoneg = AUTONEG_DISABLE;
4409                 tp->link_config.advertising = 0;
4410                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4411
4412                 err = -EIO;
4413
4414                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4415                 case 0:
4416                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4417                                 goto done;
4418
4419                         tp->link_config.speed = SPEED_10;
4420                         break;
4421                 case BMCR_SPEED100:
4422                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4423                                 goto done;
4424
4425                         tp->link_config.speed = SPEED_100;
4426                         break;
4427                 case BMCR_SPEED1000:
4428                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4429                                 tp->link_config.speed = SPEED_1000;
4430                                 break;
4431                         }
4432                         /* Fall through */
4433                 default:
4434                         goto done;
4435                 }
4436
4437                 if (val & BMCR_FULLDPLX)
4438                         tp->link_config.duplex = DUPLEX_FULL;
4439                 else
4440                         tp->link_config.duplex = DUPLEX_HALF;
4441
4442                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4443
4444                 err = 0;
4445                 goto done;
4446         }
4447
4448         tp->link_config.autoneg = AUTONEG_ENABLE;
4449         tp->link_config.advertising = ADVERTISED_Autoneg;
4450         tg3_flag_set(tp, PAUSE_AUTONEG);
4451
4452         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4453                 u32 adv;
4454
4455                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4456                 if (err)
4457                         goto done;
4458
4459                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4460                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4461
4462                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4463         } else {
4464                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4465         }
4466
4467         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4468                 u32 adv;
4469
4470                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4471                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4472                         if (err)
4473                                 goto done;
4474
4475                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4476                 } else {
4477                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4478                         if (err)
4479                                 goto done;
4480
4481                         adv = tg3_decode_flowctrl_1000X(val);
4482                         tp->link_config.flowctrl = adv;
4483
4484                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4485                         adv = mii_adv_to_ethtool_adv_x(val);
4486                 }
4487
4488                 tp->link_config.advertising |= adv;
4489         }
4490
4491 done:
4492         return err;
4493 }
4494
4495 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4496 {
4497         int err;
4498
4499         /* Turn off tap power management. */
4500         /* Set Extended packet length bit */
4501         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4502
4503         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4504         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4505         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4506         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4507         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4508
4509         udelay(40);
4510
4511         return err;
4512 }
4513
4514 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4515 {
4516         u32 val;
4517         u32 tgtadv = 0;
4518         u32 advertising = tp->link_config.advertising;
4519
4520         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4521                 return true;
4522
4523         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4524                 return false;
4525
4526         val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4527
4528
4529         if (advertising & ADVERTISED_100baseT_Full)
4530                 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4531         if (advertising & ADVERTISED_1000baseT_Full)
4532                 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4533
4534         if (val != tgtadv)
4535                 return false;
4536
4537         return true;
4538 }
4539
4540 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4541 {
4542         u32 advmsk, tgtadv, advertising;
4543
4544         advertising = tp->link_config.advertising;
4545         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4546
4547         advmsk = ADVERTISE_ALL;
4548         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4549                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4550                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4551         }
4552
4553         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4554                 return false;
4555
4556         if ((*lcladv & advmsk) != tgtadv)
4557                 return false;
4558
4559         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4560                 u32 tg3_ctrl;
4561
4562                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4563
4564                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4565                         return false;
4566
4567                 if (tgtadv &&
4568                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4569                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4570                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4571                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4572                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4573                 } else {
4574                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4575                 }
4576
4577                 if (tg3_ctrl != tgtadv)
4578                         return false;
4579         }
4580
4581         return true;
4582 }
4583
4584 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4585 {
4586         u32 lpeth = 0;
4587
4588         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4589                 u32 val;
4590
4591                 if (tg3_readphy(tp, MII_STAT1000, &val))
4592                         return false;
4593
4594                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4595         }
4596
4597         if (tg3_readphy(tp, MII_LPA, rmtadv))
4598                 return false;
4599
4600         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4601         tp->link_config.rmt_adv = lpeth;
4602
4603         return true;
4604 }
4605
4606 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4607 {
4608         if (curr_link_up != tp->link_up) {
4609                 if (curr_link_up) {
4610                         netif_carrier_on(tp->dev);
4611                 } else {
4612                         netif_carrier_off(tp->dev);
4613                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4614                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4615                 }
4616
4617                 tg3_link_report(tp);
4618                 return true;
4619         }
4620
4621         return false;
4622 }
4623
4624 static void tg3_clear_mac_status(struct tg3 *tp)
4625 {
4626         tw32(MAC_EVENT, 0);
4627
4628         tw32_f(MAC_STATUS,
4629                MAC_STATUS_SYNC_CHANGED |
4630                MAC_STATUS_CFG_CHANGED |
4631                MAC_STATUS_MI_COMPLETION |
4632                MAC_STATUS_LNKSTATE_CHANGED);
4633         udelay(40);
4634 }
4635
4636 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4637 {
4638         bool current_link_up;
4639         u32 bmsr, val;
4640         u32 lcl_adv, rmt_adv;
4641         u16 current_speed;
4642         u8 current_duplex;
4643         int i, err;
4644
4645         tg3_clear_mac_status(tp);
4646
4647         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4648                 tw32_f(MAC_MI_MODE,
4649                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4650                 udelay(80);
4651         }
4652
4653         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4654
4655         /* Some third-party PHYs need to be reset on link going
4656          * down.
4657          */
4658         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4659              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4660              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4661             tp->link_up) {
4662                 tg3_readphy(tp, MII_BMSR, &bmsr);
4663                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4664                     !(bmsr & BMSR_LSTATUS))
4665                         force_reset = true;
4666         }
4667         if (force_reset)
4668                 tg3_phy_reset(tp);
4669
4670         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4671                 tg3_readphy(tp, MII_BMSR, &bmsr);
4672                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4673                     !tg3_flag(tp, INIT_COMPLETE))
4674                         bmsr = 0;
4675
4676                 if (!(bmsr & BMSR_LSTATUS)) {
4677                         err = tg3_init_5401phy_dsp(tp);
4678                         if (err)
4679                                 return err;
4680
4681                         tg3_readphy(tp, MII_BMSR, &bmsr);
4682                         for (i = 0; i < 1000; i++) {
4683                                 udelay(10);
4684                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4685                                     (bmsr & BMSR_LSTATUS)) {
4686                                         udelay(40);
4687                                         break;
4688                                 }
4689                         }
4690
4691                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4692                             TG3_PHY_REV_BCM5401_B0 &&
4693                             !(bmsr & BMSR_LSTATUS) &&
4694                             tp->link_config.active_speed == SPEED_1000) {
4695                                 err = tg3_phy_reset(tp);
4696                                 if (!err)
4697                                         err = tg3_init_5401phy_dsp(tp);
4698                                 if (err)
4699                                         return err;
4700                         }
4701                 }
4702         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4703                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4704                 /* 5701 {A0,B0} CRC bug workaround */
4705                 tg3_writephy(tp, 0x15, 0x0a75);
4706                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4707                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4708                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4709         }
4710
4711         /* Clear pending interrupts... */
4712         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4713         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4714
4715         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4716                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4717         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4718                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4719
4720         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4721             tg3_asic_rev(tp) == ASIC_REV_5701) {
4722                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4723                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4724                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4725                 else
4726                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4727         }
4728
4729         current_link_up = false;
4730         current_speed = SPEED_UNKNOWN;
4731         current_duplex = DUPLEX_UNKNOWN;
4732         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4733         tp->link_config.rmt_adv = 0;
4734
4735         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4736                 err = tg3_phy_auxctl_read(tp,
4737                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4738                                           &val);
4739                 if (!err && !(val & (1 << 10))) {
4740                         tg3_phy_auxctl_write(tp,
4741                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4742                                              val | (1 << 10));
4743                         goto relink;
4744                 }
4745         }
4746
4747         bmsr = 0;
4748         for (i = 0; i < 100; i++) {
4749                 tg3_readphy(tp, MII_BMSR, &bmsr);
4750                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4751                     (bmsr & BMSR_LSTATUS))
4752                         break;
4753                 udelay(40);
4754         }
4755
4756         if (bmsr & BMSR_LSTATUS) {
4757                 u32 aux_stat, bmcr;
4758
4759                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4760                 for (i = 0; i < 2000; i++) {
4761                         udelay(10);
4762                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4763                             aux_stat)
4764                                 break;
4765                 }
4766
4767                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4768                                              &current_speed,
4769                                              &current_duplex);
4770
4771                 bmcr = 0;
4772                 for (i = 0; i < 200; i++) {
4773                         tg3_readphy(tp, MII_BMCR, &bmcr);
4774                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4775                                 continue;
4776                         if (bmcr && bmcr != 0x7fff)
4777                                 break;
4778                         udelay(10);
4779                 }
4780
4781                 lcl_adv = 0;
4782                 rmt_adv = 0;
4783
4784                 tp->link_config.active_speed = current_speed;
4785                 tp->link_config.active_duplex = current_duplex;
4786
4787                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4788                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4789
4790                         if ((bmcr & BMCR_ANENABLE) &&
4791                             eee_config_ok &&
4792                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4793                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4794                                 current_link_up = true;
4795
4796                         /* EEE settings changes take effect only after a phy
4797                          * reset.  If we have skipped a reset due to Link Flap
4798                          * Avoidance being enabled, do it now.
4799                          */
4800                         if (!eee_config_ok &&
4801                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4802                             !force_reset)
4803                                 tg3_phy_reset(tp);
4804                 } else {
4805                         if (!(bmcr & BMCR_ANENABLE) &&
4806                             tp->link_config.speed == current_speed &&
4807                             tp->link_config.duplex == current_duplex) {
4808                                 current_link_up = true;
4809                         }
4810                 }
4811
4812                 if (current_link_up &&
4813                     tp->link_config.active_duplex == DUPLEX_FULL) {
4814                         u32 reg, bit;
4815
4816                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4817                                 reg = MII_TG3_FET_GEN_STAT;
4818                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4819                         } else {
4820                                 reg = MII_TG3_EXT_STAT;
4821                                 bit = MII_TG3_EXT_STAT_MDIX;
4822                         }
4823
4824                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4825                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4826
4827                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4828                 }
4829         }
4830
4831 relink:
4832         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4833                 tg3_phy_copper_begin(tp);
4834
4835                 if (tg3_flag(tp, ROBOSWITCH)) {
4836                         current_link_up = true;
4837                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4838                         current_speed = SPEED_1000;
4839                         current_duplex = DUPLEX_FULL;
4840                         tp->link_config.active_speed = current_speed;
4841                         tp->link_config.active_duplex = current_duplex;
4842                 }
4843
4844                 tg3_readphy(tp, MII_BMSR, &bmsr);
4845                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4846                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4847                         current_link_up = true;
4848         }
4849
4850         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4851         if (current_link_up) {
4852                 if (tp->link_config.active_speed == SPEED_100 ||
4853                     tp->link_config.active_speed == SPEED_10)
4854                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4855                 else
4856                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4857         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4858                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4859         else
4860                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4861
4862         /* In order for the 5750 core in BCM4785 chip to work properly
4863          * in RGMII mode, the Led Control Register must be set up.
4864          */
4865         if (tg3_flag(tp, RGMII_MODE)) {
4866                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4867                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4868
4869                 if (tp->link_config.active_speed == SPEED_10)
4870                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4871                 else if (tp->link_config.active_speed == SPEED_100)
4872                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4873                                      LED_CTRL_100MBPS_ON);
4874                 else if (tp->link_config.active_speed == SPEED_1000)
4875                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4876                                      LED_CTRL_1000MBPS_ON);
4877
4878                 tw32(MAC_LED_CTRL, led_ctrl);
4879                 udelay(40);
4880         }
4881
4882         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4883         if (tp->link_config.active_duplex == DUPLEX_HALF)
4884                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4885
4886         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4887                 if (current_link_up &&
4888                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4889                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4890                 else
4891                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4892         }
4893
4894         /* ??? Without this setting Netgear GA302T PHY does not
4895          * ??? send/receive packets...
4896          */
4897         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4898             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4899                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4900                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4901                 udelay(80);
4902         }
4903
4904         tw32_f(MAC_MODE, tp->mac_mode);
4905         udelay(40);
4906
4907         tg3_phy_eee_adjust(tp, current_link_up);
4908
4909         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4910                 /* Polled via timer. */
4911                 tw32_f(MAC_EVENT, 0);
4912         } else {
4913                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4914         }
4915         udelay(40);
4916
4917         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4918             current_link_up &&
4919             tp->link_config.active_speed == SPEED_1000 &&
4920             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4921                 udelay(120);
4922                 tw32_f(MAC_STATUS,
4923                      (MAC_STATUS_SYNC_CHANGED |
4924                       MAC_STATUS_CFG_CHANGED));
4925                 udelay(40);
4926                 tg3_write_mem(tp,
4927                               NIC_SRAM_FIRMWARE_MBOX,
4928                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4929         }
4930
4931         /* Prevent send BD corruption. */
4932         if (tg3_flag(tp, CLKREQ_BUG)) {
4933                 if (tp->link_config.active_speed == SPEED_100 ||
4934                     tp->link_config.active_speed == SPEED_10)
4935                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4936                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4937                 else
4938                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4939                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4940         }
4941
4942         tg3_test_and_report_link_chg(tp, current_link_up);
4943
4944         return 0;
4945 }
4946
4947 struct tg3_fiber_aneginfo {
4948         int state;
4949 #define ANEG_STATE_UNKNOWN              0
4950 #define ANEG_STATE_AN_ENABLE            1
4951 #define ANEG_STATE_RESTART_INIT         2
4952 #define ANEG_STATE_RESTART              3
4953 #define ANEG_STATE_DISABLE_LINK_OK      4
4954 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4955 #define ANEG_STATE_ABILITY_DETECT       6
4956 #define ANEG_STATE_ACK_DETECT_INIT      7
4957 #define ANEG_STATE_ACK_DETECT           8
4958 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4959 #define ANEG_STATE_COMPLETE_ACK         10
4960 #define ANEG_STATE_IDLE_DETECT_INIT     11
4961 #define ANEG_STATE_IDLE_DETECT          12
4962 #define ANEG_STATE_LINK_OK              13
4963 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4964 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4965
4966         u32 flags;
4967 #define MR_AN_ENABLE            0x00000001
4968 #define MR_RESTART_AN           0x00000002
4969 #define MR_AN_COMPLETE          0x00000004
4970 #define MR_PAGE_RX              0x00000008
4971 #define MR_NP_LOADED            0x00000010
4972 #define MR_TOGGLE_TX            0x00000020
4973 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4974 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4975 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4976 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4977 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4978 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4979 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4980 #define MR_TOGGLE_RX            0x00002000
4981 #define MR_NP_RX                0x00004000
4982
4983 #define MR_LINK_OK              0x80000000
4984
4985         unsigned long link_time, cur_time;
4986
4987         u32 ability_match_cfg;
4988         int ability_match_count;
4989
4990         char ability_match, idle_match, ack_match;
4991
4992         u32 txconfig, rxconfig;
4993 #define ANEG_CFG_NP             0x00000080
4994 #define ANEG_CFG_ACK            0x00000040
4995 #define ANEG_CFG_RF2            0x00000020
4996 #define ANEG_CFG_RF1            0x00000010
4997 #define ANEG_CFG_PS2            0x00000001
4998 #define ANEG_CFG_PS1            0x00008000
4999 #define ANEG_CFG_HD             0x00004000
5000 #define ANEG_CFG_FD             0x00002000
5001 #define ANEG_CFG_INVAL          0x00001f06
5002
5003 };
5004 #define ANEG_OK         0
5005 #define ANEG_DONE       1
5006 #define ANEG_TIMER_ENAB 2
5007 #define ANEG_FAILED     -1
5008
5009 #define ANEG_STATE_SETTLE_TIME  10000
5010
5011 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5012                                    struct tg3_fiber_aneginfo *ap)
5013 {
5014         u16 flowctrl;
5015         unsigned long delta;
5016         u32 rx_cfg_reg;
5017         int ret;
5018
5019         if (ap->state == ANEG_STATE_UNKNOWN) {
5020                 ap->rxconfig = 0;
5021                 ap->link_time = 0;
5022                 ap->cur_time = 0;
5023                 ap->ability_match_cfg = 0;
5024                 ap->ability_match_count = 0;
5025                 ap->ability_match = 0;
5026                 ap->idle_match = 0;
5027                 ap->ack_match = 0;
5028         }
5029         ap->cur_time++;
5030
5031         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5032                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5033
5034                 if (rx_cfg_reg != ap->ability_match_cfg) {
5035                         ap->ability_match_cfg = rx_cfg_reg;
5036                         ap->ability_match = 0;
5037                         ap->ability_match_count = 0;
5038                 } else {
5039                         if (++ap->ability_match_count > 1) {
5040                                 ap->ability_match = 1;
5041                                 ap->ability_match_cfg = rx_cfg_reg;
5042                         }
5043                 }
5044                 if (rx_cfg_reg & ANEG_CFG_ACK)
5045                         ap->ack_match = 1;
5046                 else
5047                         ap->ack_match = 0;
5048
5049                 ap->idle_match = 0;
5050         } else {
5051                 ap->idle_match = 1;
5052                 ap->ability_match_cfg = 0;
5053                 ap->ability_match_count = 0;
5054                 ap->ability_match = 0;
5055                 ap->ack_match = 0;
5056
5057                 rx_cfg_reg = 0;
5058         }
5059
5060         ap->rxconfig = rx_cfg_reg;
5061         ret = ANEG_OK;
5062
5063         switch (ap->state) {
5064         case ANEG_STATE_UNKNOWN:
5065                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5066                         ap->state = ANEG_STATE_AN_ENABLE;
5067
5068                 /* fallthru */
5069         case ANEG_STATE_AN_ENABLE:
5070                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5071                 if (ap->flags & MR_AN_ENABLE) {
5072                         ap->link_time = 0;
5073                         ap->cur_time = 0;
5074                         ap->ability_match_cfg = 0;
5075                         ap->ability_match_count = 0;
5076                         ap->ability_match = 0;
5077                         ap->idle_match = 0;
5078                         ap->ack_match = 0;
5079
5080                         ap->state = ANEG_STATE_RESTART_INIT;
5081                 } else {
5082                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5083                 }
5084                 break;
5085
5086         case ANEG_STATE_RESTART_INIT:
5087                 ap->link_time = ap->cur_time;
5088                 ap->flags &= ~(MR_NP_LOADED);
5089                 ap->txconfig = 0;
5090                 tw32(MAC_TX_AUTO_NEG, 0);
5091                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5092                 tw32_f(MAC_MODE, tp->mac_mode);
5093                 udelay(40);
5094
5095                 ret = ANEG_TIMER_ENAB;
5096                 ap->state = ANEG_STATE_RESTART;
5097
5098                 /* fallthru */
5099         case ANEG_STATE_RESTART:
5100                 delta = ap->cur_time - ap->link_time;
5101                 if (delta > ANEG_STATE_SETTLE_TIME)
5102                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5103                 else
5104                         ret = ANEG_TIMER_ENAB;
5105                 break;
5106
5107         case ANEG_STATE_DISABLE_LINK_OK:
5108                 ret = ANEG_DONE;
5109                 break;
5110
5111         case ANEG_STATE_ABILITY_DETECT_INIT:
5112                 ap->flags &= ~(MR_TOGGLE_TX);
5113                 ap->txconfig = ANEG_CFG_FD;
5114                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5115                 if (flowctrl & ADVERTISE_1000XPAUSE)
5116                         ap->txconfig |= ANEG_CFG_PS1;
5117                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5118                         ap->txconfig |= ANEG_CFG_PS2;
5119                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5120                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5121                 tw32_f(MAC_MODE, tp->mac_mode);
5122                 udelay(40);
5123
5124                 ap->state = ANEG_STATE_ABILITY_DETECT;
5125                 break;
5126
5127         case ANEG_STATE_ABILITY_DETECT:
5128                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5129                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5130                 break;
5131
5132         case ANEG_STATE_ACK_DETECT_INIT:
5133                 ap->txconfig |= ANEG_CFG_ACK;
5134                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5135                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5136                 tw32_f(MAC_MODE, tp->mac_mode);
5137                 udelay(40);
5138
5139                 ap->state = ANEG_STATE_ACK_DETECT;
5140
5141                 /* fallthru */
5142         case ANEG_STATE_ACK_DETECT:
5143                 if (ap->ack_match != 0) {
5144                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5145                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5146                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5147                         } else {
5148                                 ap->state = ANEG_STATE_AN_ENABLE;
5149                         }
5150                 } else if (ap->ability_match != 0 &&
5151                            ap->rxconfig == 0) {
5152                         ap->state = ANEG_STATE_AN_ENABLE;
5153                 }
5154                 break;
5155
5156         case ANEG_STATE_COMPLETE_ACK_INIT:
5157                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5158                         ret = ANEG_FAILED;
5159                         break;
5160                 }
5161                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5162                                MR_LP_ADV_HALF_DUPLEX |
5163                                MR_LP_ADV_SYM_PAUSE |
5164                                MR_LP_ADV_ASYM_PAUSE |
5165                                MR_LP_ADV_REMOTE_FAULT1 |
5166                                MR_LP_ADV_REMOTE_FAULT2 |
5167                                MR_LP_ADV_NEXT_PAGE |
5168                                MR_TOGGLE_RX |
5169                                MR_NP_RX);
5170                 if (ap->rxconfig & ANEG_CFG_FD)
5171                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5172                 if (ap->rxconfig & ANEG_CFG_HD)
5173                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5174                 if (ap->rxconfig & ANEG_CFG_PS1)
5175                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5176                 if (ap->rxconfig & ANEG_CFG_PS2)
5177                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5178                 if (ap->rxconfig & ANEG_CFG_RF1)
5179                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5180                 if (ap->rxconfig & ANEG_CFG_RF2)
5181                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5182                 if (ap->rxconfig & ANEG_CFG_NP)
5183                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5184
5185                 ap->link_time = ap->cur_time;
5186
5187                 ap->flags ^= (MR_TOGGLE_TX);
5188                 if (ap->rxconfig & 0x0008)
5189                         ap->flags |= MR_TOGGLE_RX;
5190                 if (ap->rxconfig & ANEG_CFG_NP)
5191                         ap->flags |= MR_NP_RX;
5192                 ap->flags |= MR_PAGE_RX;
5193
5194                 ap->state = ANEG_STATE_COMPLETE_ACK;
5195                 ret = ANEG_TIMER_ENAB;
5196                 break;
5197
5198         case ANEG_STATE_COMPLETE_ACK:
5199                 if (ap->ability_match != 0 &&
5200                     ap->rxconfig == 0) {
5201                         ap->state = ANEG_STATE_AN_ENABLE;
5202                         break;
5203                 }
5204                 delta = ap->cur_time - ap->link_time;
5205                 if (delta > ANEG_STATE_SETTLE_TIME) {
5206                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5207                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5208                         } else {
5209                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5210                                     !(ap->flags & MR_NP_RX)) {
5211                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5212                                 } else {
5213                                         ret = ANEG_FAILED;
5214                                 }
5215                         }
5216                 }
5217                 break;
5218
5219         case ANEG_STATE_IDLE_DETECT_INIT:
5220                 ap->link_time = ap->cur_time;
5221                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5222                 tw32_f(MAC_MODE, tp->mac_mode);
5223                 udelay(40);
5224
5225                 ap->state = ANEG_STATE_IDLE_DETECT;
5226                 ret = ANEG_TIMER_ENAB;
5227                 break;
5228
5229         case ANEG_STATE_IDLE_DETECT:
5230                 if (ap->ability_match != 0 &&
5231                     ap->rxconfig == 0) {
5232                         ap->state = ANEG_STATE_AN_ENABLE;
5233                         break;
5234                 }
5235                 delta = ap->cur_time - ap->link_time;
5236                 if (delta > ANEG_STATE_SETTLE_TIME) {
5237                         /* XXX another gem from the Broadcom driver :( */
5238                         ap->state = ANEG_STATE_LINK_OK;
5239                 }
5240                 break;
5241
5242         case ANEG_STATE_LINK_OK:
5243                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5244                 ret = ANEG_DONE;
5245                 break;
5246
5247         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5248                 /* ??? unimplemented */
5249                 break;
5250
5251         case ANEG_STATE_NEXT_PAGE_WAIT:
5252                 /* ??? unimplemented */
5253                 break;
5254
5255         default:
5256                 ret = ANEG_FAILED;
5257                 break;
5258         }
5259
5260         return ret;
5261 }
5262
5263 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5264 {
5265         int res = 0;
5266         struct tg3_fiber_aneginfo aninfo;
5267         int status = ANEG_FAILED;
5268         unsigned int tick;
5269         u32 tmp;
5270
5271         tw32_f(MAC_TX_AUTO_NEG, 0);
5272
5273         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5274         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5275         udelay(40);
5276
5277         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5278         udelay(40);
5279
5280         memset(&aninfo, 0, sizeof(aninfo));
5281         aninfo.flags |= MR_AN_ENABLE;
5282         aninfo.state = ANEG_STATE_UNKNOWN;
5283         aninfo.cur_time = 0;
5284         tick = 0;
5285         while (++tick < 195000) {
5286                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5287                 if (status == ANEG_DONE || status == ANEG_FAILED)
5288                         break;
5289
5290                 udelay(1);
5291         }
5292
5293         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5294         tw32_f(MAC_MODE, tp->mac_mode);
5295         udelay(40);
5296
5297         *txflags = aninfo.txconfig;
5298         *rxflags = aninfo.flags;
5299
5300         if (status == ANEG_DONE &&
5301             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5302                              MR_LP_ADV_FULL_DUPLEX)))
5303                 res = 1;
5304
5305         return res;
5306 }
5307
5308 static void tg3_init_bcm8002(struct tg3 *tp)
5309 {
5310         u32 mac_status = tr32(MAC_STATUS);
5311         int i;
5312
5313         /* Reset when initting first time or we have a link. */
5314         if (tg3_flag(tp, INIT_COMPLETE) &&
5315             !(mac_status & MAC_STATUS_PCS_SYNCED))
5316                 return;
5317
5318         /* Set PLL lock range. */
5319         tg3_writephy(tp, 0x16, 0x8007);
5320
5321         /* SW reset */
5322         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5323
5324         /* Wait for reset to complete. */
5325         /* XXX schedule_timeout() ... */
5326         for (i = 0; i < 500; i++)
5327                 udelay(10);
5328
5329         /* Config mode; select PMA/Ch 1 regs. */
5330         tg3_writephy(tp, 0x10, 0x8411);
5331
5332         /* Enable auto-lock and comdet, select txclk for tx. */
5333         tg3_writephy(tp, 0x11, 0x0a10);
5334
5335         tg3_writephy(tp, 0x18, 0x00a0);
5336         tg3_writephy(tp, 0x16, 0x41ff);
5337
5338         /* Assert and deassert POR. */
5339         tg3_writephy(tp, 0x13, 0x0400);
5340         udelay(40);
5341         tg3_writephy(tp, 0x13, 0x0000);
5342
5343         tg3_writephy(tp, 0x11, 0x0a50);
5344         udelay(40);
5345         tg3_writephy(tp, 0x11, 0x0a10);
5346
5347         /* Wait for signal to stabilize */
5348         /* XXX schedule_timeout() ... */
5349         for (i = 0; i < 15000; i++)
5350                 udelay(10);
5351
5352         /* Deselect the channel register so we can read the PHYID
5353          * later.
5354          */
5355         tg3_writephy(tp, 0x10, 0x8011);
5356 }
5357
5358 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5359 {
5360         u16 flowctrl;
5361         bool current_link_up;
5362         u32 sg_dig_ctrl, sg_dig_status;
5363         u32 serdes_cfg, expected_sg_dig_ctrl;
5364         int workaround, port_a;
5365
5366         serdes_cfg = 0;
5367         expected_sg_dig_ctrl = 0;
5368         workaround = 0;
5369         port_a = 1;
5370         current_link_up = false;
5371
5372         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5373             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5374                 workaround = 1;
5375                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5376                         port_a = 0;
5377
5378                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5379                 /* preserve bits 20-23 for voltage regulator */
5380                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5381         }
5382
5383         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5384
5385         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5386                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5387                         if (workaround) {
5388                                 u32 val = serdes_cfg;
5389
5390                                 if (port_a)
5391                                         val |= 0xc010000;
5392                                 else
5393                                         val |= 0x4010000;
5394                                 tw32_f(MAC_SERDES_CFG, val);
5395                         }
5396
5397                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5398                 }
5399                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5400                         tg3_setup_flow_control(tp, 0, 0);
5401                         current_link_up = true;
5402                 }
5403                 goto out;
5404         }
5405
5406         /* Want auto-negotiation.  */
5407         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5408
5409         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5410         if (flowctrl & ADVERTISE_1000XPAUSE)
5411                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5412         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5413                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5414
5415         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5416                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5417                     tp->serdes_counter &&
5418                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5419                                     MAC_STATUS_RCVD_CFG)) ==
5420                      MAC_STATUS_PCS_SYNCED)) {
5421                         tp->serdes_counter--;
5422                         current_link_up = true;
5423                         goto out;
5424                 }
5425 restart_autoneg:
5426                 if (workaround)
5427                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5428                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5429                 udelay(5);
5430                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5431
5432                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5433                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5434         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5435                                  MAC_STATUS_SIGNAL_DET)) {
5436                 sg_dig_status = tr32(SG_DIG_STATUS);
5437                 mac_status = tr32(MAC_STATUS);
5438
5439                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5440                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5441                         u32 local_adv = 0, remote_adv = 0;
5442
5443                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5444                                 local_adv |= ADVERTISE_1000XPAUSE;
5445                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5446                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5447
5448                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5449                                 remote_adv |= LPA_1000XPAUSE;
5450                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5451                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5452
5453                         tp->link_config.rmt_adv =
5454                                            mii_adv_to_ethtool_adv_x(remote_adv);
5455
5456                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5457                         current_link_up = true;
5458                         tp->serdes_counter = 0;
5459                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5460                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5461                         if (tp->serdes_counter)
5462                                 tp->serdes_counter--;
5463                         else {
5464                                 if (workaround) {
5465                                         u32 val = serdes_cfg;
5466
5467                                         if (port_a)
5468                                                 val |= 0xc010000;
5469                                         else
5470                                                 val |= 0x4010000;
5471
5472                                         tw32_f(MAC_SERDES_CFG, val);
5473                                 }
5474
5475                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5476                                 udelay(40);
5477
5478                                 /* Link parallel detection - link is up */
5479                                 /* only if we have PCS_SYNC and not */
5480                                 /* receiving config code words */
5481                                 mac_status = tr32(MAC_STATUS);
5482                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5483                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5484                                         tg3_setup_flow_control(tp, 0, 0);
5485                                         current_link_up = true;
5486                                         tp->phy_flags |=
5487                                                 TG3_PHYFLG_PARALLEL_DETECT;
5488                                         tp->serdes_counter =
5489                                                 SERDES_PARALLEL_DET_TIMEOUT;
5490                                 } else
5491                                         goto restart_autoneg;
5492                         }
5493                 }
5494         } else {
5495                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5496                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5497         }
5498
5499 out:
5500         return current_link_up;
5501 }
5502
5503 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5504 {
5505         bool current_link_up = false;
5506
5507         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5508                 goto out;
5509
5510         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5511                 u32 txflags, rxflags;
5512                 int i;
5513
5514                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5515                         u32 local_adv = 0, remote_adv = 0;
5516
5517                         if (txflags & ANEG_CFG_PS1)
5518                                 local_adv |= ADVERTISE_1000XPAUSE;
5519                         if (txflags & ANEG_CFG_PS2)
5520                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5521
5522                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5523                                 remote_adv |= LPA_1000XPAUSE;
5524                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5525                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5526
5527                         tp->link_config.rmt_adv =
5528                                            mii_adv_to_ethtool_adv_x(remote_adv);
5529
5530                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5531
5532                         current_link_up = true;
5533                 }
5534                 for (i = 0; i < 30; i++) {
5535                         udelay(20);
5536                         tw32_f(MAC_STATUS,
5537                                (MAC_STATUS_SYNC_CHANGED |
5538                                 MAC_STATUS_CFG_CHANGED));
5539                         udelay(40);
5540                         if ((tr32(MAC_STATUS) &
5541                              (MAC_STATUS_SYNC_CHANGED |
5542                               MAC_STATUS_CFG_CHANGED)) == 0)
5543                                 break;
5544                 }
5545
5546                 mac_status = tr32(MAC_STATUS);
5547                 if (!current_link_up &&
5548                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5549                     !(mac_status & MAC_STATUS_RCVD_CFG))
5550                         current_link_up = true;
5551         } else {
5552                 tg3_setup_flow_control(tp, 0, 0);
5553
5554                 /* Forcing 1000FD link up. */
5555                 current_link_up = true;
5556
5557                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5558                 udelay(40);
5559
5560                 tw32_f(MAC_MODE, tp->mac_mode);
5561                 udelay(40);
5562         }
5563
5564 out:
5565         return current_link_up;
5566 }
5567
5568 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5569 {
5570         u32 orig_pause_cfg;
5571         u16 orig_active_speed;
5572         u8 orig_active_duplex;
5573         u32 mac_status;
5574         bool current_link_up;
5575         int i;
5576
5577         orig_pause_cfg = tp->link_config.active_flowctrl;
5578         orig_active_speed = tp->link_config.active_speed;
5579         orig_active_duplex = tp->link_config.active_duplex;
5580
5581         if (!tg3_flag(tp, HW_AUTONEG) &&
5582             tp->link_up &&
5583             tg3_flag(tp, INIT_COMPLETE)) {
5584                 mac_status = tr32(MAC_STATUS);
5585                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5586                                MAC_STATUS_SIGNAL_DET |
5587                                MAC_STATUS_CFG_CHANGED |
5588                                MAC_STATUS_RCVD_CFG);
5589                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5590                                    MAC_STATUS_SIGNAL_DET)) {
5591                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5592                                             MAC_STATUS_CFG_CHANGED));
5593                         return 0;
5594                 }
5595         }
5596
5597         tw32_f(MAC_TX_AUTO_NEG, 0);
5598
5599         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5600         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5601         tw32_f(MAC_MODE, tp->mac_mode);
5602         udelay(40);
5603
5604         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5605                 tg3_init_bcm8002(tp);
5606
5607         /* Enable link change event even when serdes polling.  */
5608         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5609         udelay(40);
5610
5611         current_link_up = false;
5612         tp->link_config.rmt_adv = 0;
5613         mac_status = tr32(MAC_STATUS);
5614
5615         if (tg3_flag(tp, HW_AUTONEG))
5616                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5617         else
5618                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5619
5620         tp->napi[0].hw_status->status =
5621                 (SD_STATUS_UPDATED |
5622                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5623
5624         for (i = 0; i < 100; i++) {
5625                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5626                                     MAC_STATUS_CFG_CHANGED));
5627                 udelay(5);
5628                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5629                                          MAC_STATUS_CFG_CHANGED |
5630                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5631                         break;
5632         }
5633
5634         mac_status = tr32(MAC_STATUS);
5635         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5636                 current_link_up = false;
5637                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5638                     tp->serdes_counter == 0) {
5639                         tw32_f(MAC_MODE, (tp->mac_mode |
5640                                           MAC_MODE_SEND_CONFIGS));
5641                         udelay(1);
5642                         tw32_f(MAC_MODE, tp->mac_mode);
5643                 }
5644         }
5645
5646         if (current_link_up) {
5647                 tp->link_config.active_speed = SPEED_1000;
5648                 tp->link_config.active_duplex = DUPLEX_FULL;
5649                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5650                                     LED_CTRL_LNKLED_OVERRIDE |
5651                                     LED_CTRL_1000MBPS_ON));
5652         } else {
5653                 tp->link_config.active_speed = SPEED_UNKNOWN;
5654                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5655                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5656                                     LED_CTRL_LNKLED_OVERRIDE |
5657                                     LED_CTRL_TRAFFIC_OVERRIDE));
5658         }
5659
5660         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5661                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5662                 if (orig_pause_cfg != now_pause_cfg ||
5663                     orig_active_speed != tp->link_config.active_speed ||
5664                     orig_active_duplex != tp->link_config.active_duplex)
5665                         tg3_link_report(tp);
5666         }
5667
5668         return 0;
5669 }
5670
5671 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5672 {
5673         int err = 0;
5674         u32 bmsr, bmcr;
5675         u16 current_speed = SPEED_UNKNOWN;
5676         u8 current_duplex = DUPLEX_UNKNOWN;
5677         bool current_link_up = false;
5678         u32 local_adv, remote_adv, sgsr;
5679
5680         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5681              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5682              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5683              (sgsr & SERDES_TG3_SGMII_MODE)) {
5684
5685                 if (force_reset)
5686                         tg3_phy_reset(tp);
5687
5688                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5689
5690                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5691                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5692                 } else {
5693                         current_link_up = true;
5694                         if (sgsr & SERDES_TG3_SPEED_1000) {
5695                                 current_speed = SPEED_1000;
5696                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5697                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5698                                 current_speed = SPEED_100;
5699                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5700                         } else {
5701                                 current_speed = SPEED_10;
5702                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5703                         }
5704
5705                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5706                                 current_duplex = DUPLEX_FULL;
5707                         else
5708                                 current_duplex = DUPLEX_HALF;
5709                 }
5710
5711                 tw32_f(MAC_MODE, tp->mac_mode);
5712                 udelay(40);
5713
5714                 tg3_clear_mac_status(tp);
5715
5716                 goto fiber_setup_done;
5717         }
5718
5719         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5720         tw32_f(MAC_MODE, tp->mac_mode);
5721         udelay(40);
5722
5723         tg3_clear_mac_status(tp);
5724
5725         if (force_reset)
5726                 tg3_phy_reset(tp);
5727
5728         tp->link_config.rmt_adv = 0;
5729
5730         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5731         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5732         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5733                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5734                         bmsr |= BMSR_LSTATUS;
5735                 else
5736                         bmsr &= ~BMSR_LSTATUS;
5737         }
5738
5739         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5740
5741         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5742             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5743                 /* do nothing, just check for link up at the end */
5744         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5745                 u32 adv, newadv;
5746
5747                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5748                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5749                                  ADVERTISE_1000XPAUSE |
5750                                  ADVERTISE_1000XPSE_ASYM |
5751                                  ADVERTISE_SLCT);
5752
5753                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5754                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5755
5756                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5757                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5758                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5759                         tg3_writephy(tp, MII_BMCR, bmcr);
5760
5761                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5762                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5763                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5764
5765                         return err;
5766                 }
5767         } else {
5768                 u32 new_bmcr;
5769
5770                 bmcr &= ~BMCR_SPEED1000;
5771                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5772
5773                 if (tp->link_config.duplex == DUPLEX_FULL)
5774                         new_bmcr |= BMCR_FULLDPLX;
5775
5776                 if (new_bmcr != bmcr) {
5777                         /* BMCR_SPEED1000 is a reserved bit that needs
5778                          * to be set on write.
5779                          */
5780                         new_bmcr |= BMCR_SPEED1000;
5781
5782                         /* Force a linkdown */
5783                         if (tp->link_up) {
5784                                 u32 adv;
5785
5786                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5787                                 adv &= ~(ADVERTISE_1000XFULL |
5788                                          ADVERTISE_1000XHALF |
5789                                          ADVERTISE_SLCT);
5790                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5791                                 tg3_writephy(tp, MII_BMCR, bmcr |
5792                                                            BMCR_ANRESTART |
5793                                                            BMCR_ANENABLE);
5794                                 udelay(10);
5795                                 tg3_carrier_off(tp);
5796                         }
5797                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5798                         bmcr = new_bmcr;
5799                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5800                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5801                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5802                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5803                                         bmsr |= BMSR_LSTATUS;
5804                                 else
5805                                         bmsr &= ~BMSR_LSTATUS;
5806                         }
5807                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5808                 }
5809         }
5810
5811         if (bmsr & BMSR_LSTATUS) {
5812                 current_speed = SPEED_1000;
5813                 current_link_up = true;
5814                 if (bmcr & BMCR_FULLDPLX)
5815                         current_duplex = DUPLEX_FULL;
5816                 else
5817                         current_duplex = DUPLEX_HALF;
5818
5819                 local_adv = 0;
5820                 remote_adv = 0;
5821
5822                 if (bmcr & BMCR_ANENABLE) {
5823                         u32 common;
5824
5825                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5826                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5827                         common = local_adv & remote_adv;
5828                         if (common & (ADVERTISE_1000XHALF |
5829                                       ADVERTISE_1000XFULL)) {
5830                                 if (common & ADVERTISE_1000XFULL)
5831                                         current_duplex = DUPLEX_FULL;
5832                                 else
5833                                         current_duplex = DUPLEX_HALF;
5834
5835                                 tp->link_config.rmt_adv =
5836                                            mii_adv_to_ethtool_adv_x(remote_adv);
5837                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5838                                 /* Link is up via parallel detect */
5839                         } else {
5840                                 current_link_up = false;
5841                         }
5842                 }
5843         }
5844
5845 fiber_setup_done:
5846         if (current_link_up && current_duplex == DUPLEX_FULL)
5847                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5848
5849         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5850         if (tp->link_config.active_duplex == DUPLEX_HALF)
5851                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5852
5853         tw32_f(MAC_MODE, tp->mac_mode);
5854         udelay(40);
5855
5856         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5857
5858         tp->link_config.active_speed = current_speed;
5859         tp->link_config.active_duplex = current_duplex;
5860
5861         tg3_test_and_report_link_chg(tp, current_link_up);
5862         return err;
5863 }
5864
5865 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5866 {
5867         if (tp->serdes_counter) {
5868                 /* Give autoneg time to complete. */
5869                 tp->serdes_counter--;
5870                 return;
5871         }
5872
5873         if (!tp->link_up &&
5874             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5875                 u32 bmcr;
5876
5877                 tg3_readphy(tp, MII_BMCR, &bmcr);
5878                 if (bmcr & BMCR_ANENABLE) {
5879                         u32 phy1, phy2;
5880
5881                         /* Select shadow register 0x1f */
5882                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5883                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5884
5885                         /* Select expansion interrupt status register */
5886                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5887                                          MII_TG3_DSP_EXP1_INT_STAT);
5888                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5889                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5890
5891                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5892                                 /* We have signal detect and not receiving
5893                                  * config code words, link is up by parallel
5894                                  * detection.
5895                                  */
5896
5897                                 bmcr &= ~BMCR_ANENABLE;
5898                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5899                                 tg3_writephy(tp, MII_BMCR, bmcr);
5900                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5901                         }
5902                 }
5903         } else if (tp->link_up &&
5904                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5905                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5906                 u32 phy2;
5907
5908                 /* Select expansion interrupt status register */
5909                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5910                                  MII_TG3_DSP_EXP1_INT_STAT);
5911                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5912                 if (phy2 & 0x20) {
5913                         u32 bmcr;
5914
5915                         /* Config code words received, turn on autoneg. */
5916                         tg3_readphy(tp, MII_BMCR, &bmcr);
5917                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5918
5919                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5920
5921                 }
5922         }
5923 }
5924
5925 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5926 {
5927         u32 val;
5928         int err;
5929
5930         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5931                 err = tg3_setup_fiber_phy(tp, force_reset);
5932         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5933                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5934         else
5935                 err = tg3_setup_copper_phy(tp, force_reset);
5936
5937         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5938                 u32 scale;
5939
5940                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5941                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5942                         scale = 65;
5943                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5944                         scale = 6;
5945                 else
5946                         scale = 12;
5947
5948                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5949                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5950                 tw32(GRC_MISC_CFG, val);
5951         }
5952
5953         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5954               (6 << TX_LENGTHS_IPG_SHIFT);
5955         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5956             tg3_asic_rev(tp) == ASIC_REV_5762)
5957                 val |= tr32(MAC_TX_LENGTHS) &
5958                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5959                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5960
5961         if (tp->link_config.active_speed == SPEED_1000 &&
5962             tp->link_config.active_duplex == DUPLEX_HALF)
5963                 tw32(MAC_TX_LENGTHS, val |
5964                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5965         else
5966                 tw32(MAC_TX_LENGTHS, val |
5967                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5968
5969         if (!tg3_flag(tp, 5705_PLUS)) {
5970                 if (tp->link_up) {
5971                         tw32(HOSTCC_STAT_COAL_TICKS,
5972                              tp->coal.stats_block_coalesce_usecs);
5973                 } else {
5974                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5975                 }
5976         }
5977
5978         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5979                 val = tr32(PCIE_PWR_MGMT_THRESH);
5980                 if (!tp->link_up)
5981                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5982                               tp->pwrmgmt_thresh;
5983                 else
5984                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5985                 tw32(PCIE_PWR_MGMT_THRESH, val);
5986         }
5987
5988         return err;
5989 }
5990
5991 /* tp->lock must be held */
5992 static u64 tg3_refclk_read(struct tg3 *tp)
5993 {
5994         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5995         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5996 }
5997
5998 /* tp->lock must be held */
5999 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6000 {
6001         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6002         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6003         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6004         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6005 }
6006
6007 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6008 static inline void tg3_full_unlock(struct tg3 *tp);
6009 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6010 {
6011         struct tg3 *tp = netdev_priv(dev);
6012
6013         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6014                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6015                                 SOF_TIMESTAMPING_SOFTWARE;
6016
6017         if (tg3_flag(tp, PTP_CAPABLE)) {
6018                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6019                                         SOF_TIMESTAMPING_RX_HARDWARE |
6020                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6021         }
6022
6023         if (tp->ptp_clock)
6024                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6025         else
6026                 info->phc_index = -1;
6027
6028         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6029
6030         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6031                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6032                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6033                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6034         return 0;
6035 }
6036
6037 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6038 {
6039         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6040         bool neg_adj = false;
6041         u32 correction = 0;
6042
6043         if (ppb < 0) {
6044                 neg_adj = true;
6045                 ppb = -ppb;
6046         }
6047
6048         /* Frequency adjustment is performed using hardware with a 24 bit
6049          * accumulator and a programmable correction value. On each clk, the
6050          * correction value gets added to the accumulator and when it
6051          * overflows, the time counter is incremented/decremented.
6052          *
6053          * So conversion from ppb to correction value is
6054          *              ppb * (1 << 24) / 1000000000
6055          */
6056         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6057                      TG3_EAV_REF_CLK_CORRECT_MASK;
6058
6059         tg3_full_lock(tp, 0);
6060
6061         if (correction)
6062                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6063                      TG3_EAV_REF_CLK_CORRECT_EN |
6064                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6065         else
6066                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6067
6068         tg3_full_unlock(tp);
6069
6070         return 0;
6071 }
6072
6073 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6074 {
6075         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6076
6077         tg3_full_lock(tp, 0);
6078         tp->ptp_adjust += delta;
6079         tg3_full_unlock(tp);
6080
6081         return 0;
6082 }
6083
6084 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6085 {
6086         u64 ns;
6087         u32 remainder;
6088         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6089
6090         tg3_full_lock(tp, 0);
6091         ns = tg3_refclk_read(tp);
6092         ns += tp->ptp_adjust;
6093         tg3_full_unlock(tp);
6094
6095         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6096         ts->tv_nsec = remainder;
6097
6098         return 0;
6099 }
6100
6101 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6102                            const struct timespec *ts)
6103 {
6104         u64 ns;
6105         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6106
6107         ns = timespec_to_ns(ts);
6108
6109         tg3_full_lock(tp, 0);
6110         tg3_refclk_write(tp, ns);
6111         tp->ptp_adjust = 0;
6112         tg3_full_unlock(tp);
6113
6114         return 0;
6115 }
6116
6117 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6118                           struct ptp_clock_request *rq, int on)
6119 {
6120         return -EOPNOTSUPP;
6121 }
6122
6123 static const struct ptp_clock_info tg3_ptp_caps = {
6124         .owner          = THIS_MODULE,
6125         .name           = "tg3 clock",
6126         .max_adj        = 250000000,
6127         .n_alarm        = 0,
6128         .n_ext_ts       = 0,
6129         .n_per_out      = 0,
6130         .pps            = 0,
6131         .adjfreq        = tg3_ptp_adjfreq,
6132         .adjtime        = tg3_ptp_adjtime,
6133         .gettime        = tg3_ptp_gettime,
6134         .settime        = tg3_ptp_settime,
6135         .enable         = tg3_ptp_enable,
6136 };
6137
6138 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6139                                      struct skb_shared_hwtstamps *timestamp)
6140 {
6141         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6142         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6143                                            tp->ptp_adjust);
6144 }
6145
6146 /* tp->lock must be held */
6147 static void tg3_ptp_init(struct tg3 *tp)
6148 {
6149         if (!tg3_flag(tp, PTP_CAPABLE))
6150                 return;
6151
6152         /* Initialize the hardware clock to the system time. */
6153         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6154         tp->ptp_adjust = 0;
6155         tp->ptp_info = tg3_ptp_caps;
6156 }
6157
6158 /* tp->lock must be held */
6159 static void tg3_ptp_resume(struct tg3 *tp)
6160 {
6161         if (!tg3_flag(tp, PTP_CAPABLE))
6162                 return;
6163
6164         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6165         tp->ptp_adjust = 0;
6166 }
6167
6168 static void tg3_ptp_fini(struct tg3 *tp)
6169 {
6170         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6171                 return;
6172
6173         ptp_clock_unregister(tp->ptp_clock);
6174         tp->ptp_clock = NULL;
6175         tp->ptp_adjust = 0;
6176 }
6177
6178 static inline int tg3_irq_sync(struct tg3 *tp)
6179 {
6180         return tp->irq_sync;
6181 }
6182
6183 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6184 {
6185         int i;
6186
6187         dst = (u32 *)((u8 *)dst + off);
6188         for (i = 0; i < len; i += sizeof(u32))
6189                 *dst++ = tr32(off + i);
6190 }
6191
6192 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6193 {
6194         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6195         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6196         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6197         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6198         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6199         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6200         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6201         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6202         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6203         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6204         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6205         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6206         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6207         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6208         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6209         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6210         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6211         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6212         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6213
6214         if (tg3_flag(tp, SUPPORT_MSIX))
6215                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6216
6217         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6218         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6219         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6220         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6221         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6222         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6223         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6224         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6225
6226         if (!tg3_flag(tp, 5705_PLUS)) {
6227                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6228                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6229                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6230         }
6231
6232         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6233         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6234         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6235         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6236         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6237
6238         if (tg3_flag(tp, NVRAM))
6239                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6240 }
6241
6242 static void tg3_dump_state(struct tg3 *tp)
6243 {
6244         int i;
6245         u32 *regs;
6246
6247         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6248         if (!regs)
6249                 return;
6250
6251         if (tg3_flag(tp, PCI_EXPRESS)) {
6252                 /* Read up to but not including private PCI registers */
6253                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6254                         regs[i / sizeof(u32)] = tr32(i);
6255         } else
6256                 tg3_dump_legacy_regs(tp, regs);
6257
6258         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6259                 if (!regs[i + 0] && !regs[i + 1] &&
6260                     !regs[i + 2] && !regs[i + 3])
6261                         continue;
6262
6263                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6264                            i * 4,
6265                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6266         }
6267
6268         kfree(regs);
6269
6270         for (i = 0; i < tp->irq_cnt; i++) {
6271                 struct tg3_napi *tnapi = &tp->napi[i];
6272
6273                 /* SW status block */
6274                 netdev_err(tp->dev,
6275                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6276                            i,
6277                            tnapi->hw_status->status,
6278                            tnapi->hw_status->status_tag,
6279                            tnapi->hw_status->rx_jumbo_consumer,
6280                            tnapi->hw_status->rx_consumer,
6281                            tnapi->hw_status->rx_mini_consumer,
6282                            tnapi->hw_status->idx[0].rx_producer,
6283                            tnapi->hw_status->idx[0].tx_consumer);
6284
6285                 netdev_err(tp->dev,
6286                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6287                            i,
6288                            tnapi->last_tag, tnapi->last_irq_tag,
6289                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6290                            tnapi->rx_rcb_ptr,
6291                            tnapi->prodring.rx_std_prod_idx,
6292                            tnapi->prodring.rx_std_cons_idx,
6293                            tnapi->prodring.rx_jmb_prod_idx,
6294                            tnapi->prodring.rx_jmb_cons_idx);
6295         }
6296 }
6297
6298 /* This is called whenever we suspect that the system chipset is re-
6299  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6300  * is bogus tx completions. We try to recover by setting the
6301  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6302  * in the workqueue.
6303  */
6304 static void tg3_tx_recover(struct tg3 *tp)
6305 {
6306         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6307                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6308
6309         netdev_warn(tp->dev,
6310                     "The system may be re-ordering memory-mapped I/O "
6311                     "cycles to the network device, attempting to recover. "
6312                     "Please report the problem to the driver maintainer "
6313                     "and include system chipset information.\n");
6314
6315         spin_lock(&tp->lock);
6316         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6317         spin_unlock(&tp->lock);
6318 }
6319
6320 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6321 {
6322         /* Tell compiler to fetch tx indices from memory. */
6323         barrier();
6324         return tnapi->tx_pending -
6325                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6326 }
6327
6328 /* Tigon3 never reports partial packet sends.  So we do not
6329  * need special logic to handle SKBs that have not had all
6330  * of their frags sent yet, like SunGEM does.
6331  */
6332 static void tg3_tx(struct tg3_napi *tnapi)
6333 {
6334         struct tg3 *tp = tnapi->tp;
6335         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6336         u32 sw_idx = tnapi->tx_cons;
6337         struct netdev_queue *txq;
6338         int index = tnapi - tp->napi;
6339         unsigned int pkts_compl = 0, bytes_compl = 0;
6340
6341         if (tg3_flag(tp, ENABLE_TSS))
6342                 index--;
6343
6344         txq = netdev_get_tx_queue(tp->dev, index);
6345
6346         while (sw_idx != hw_idx) {
6347                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6348                 struct sk_buff *skb = ri->skb;
6349                 int i, tx_bug = 0;
6350
6351                 if (unlikely(skb == NULL)) {
6352                         tg3_tx_recover(tp);
6353                         return;
6354                 }
6355
6356                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6357                         struct skb_shared_hwtstamps timestamp;
6358                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6359                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6360
6361                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6362
6363                         skb_tstamp_tx(skb, &timestamp);
6364                 }
6365
6366                 pci_unmap_single(tp->pdev,
6367                                  dma_unmap_addr(ri, mapping),
6368                                  skb_headlen(skb),
6369                                  PCI_DMA_TODEVICE);
6370
6371                 ri->skb = NULL;
6372
6373                 while (ri->fragmented) {
6374                         ri->fragmented = false;
6375                         sw_idx = NEXT_TX(sw_idx);
6376                         ri = &tnapi->tx_buffers[sw_idx];
6377                 }
6378
6379                 sw_idx = NEXT_TX(sw_idx);
6380
6381                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6382                         ri = &tnapi->tx_buffers[sw_idx];
6383                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6384                                 tx_bug = 1;
6385
6386                         pci_unmap_page(tp->pdev,
6387                                        dma_unmap_addr(ri, mapping),
6388                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6389                                        PCI_DMA_TODEVICE);
6390
6391                         while (ri->fragmented) {
6392                                 ri->fragmented = false;
6393                                 sw_idx = NEXT_TX(sw_idx);
6394                                 ri = &tnapi->tx_buffers[sw_idx];
6395                         }
6396
6397                         sw_idx = NEXT_TX(sw_idx);
6398                 }
6399
6400                 pkts_compl++;
6401                 bytes_compl += skb->len;
6402
6403                 dev_kfree_skb(skb);
6404
6405                 if (unlikely(tx_bug)) {
6406                         tg3_tx_recover(tp);
6407                         return;
6408                 }
6409         }
6410
6411         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6412
6413         tnapi->tx_cons = sw_idx;
6414
6415         /* Need to make the tx_cons update visible to tg3_start_xmit()
6416          * before checking for netif_queue_stopped().  Without the
6417          * memory barrier, there is a small possibility that tg3_start_xmit()
6418          * will miss it and cause the queue to be stopped forever.
6419          */
6420         smp_mb();
6421
6422         if (unlikely(netif_tx_queue_stopped(txq) &&
6423                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6424                 __netif_tx_lock(txq, smp_processor_id());
6425                 if (netif_tx_queue_stopped(txq) &&
6426                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6427                         netif_tx_wake_queue(txq);
6428                 __netif_tx_unlock(txq);
6429         }
6430 }
6431
6432 static void tg3_frag_free(bool is_frag, void *data)
6433 {
6434         if (is_frag)
6435                 put_page(virt_to_head_page(data));
6436         else
6437                 kfree(data);
6438 }
6439
6440 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6441 {
6442         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6443                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6444
6445         if (!ri->data)
6446                 return;
6447
6448         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6449                          map_sz, PCI_DMA_FROMDEVICE);
6450         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6451         ri->data = NULL;
6452 }
6453
6454
6455 /* Returns size of skb allocated or < 0 on error.
6456  *
6457  * We only need to fill in the address because the other members
6458  * of the RX descriptor are invariant, see tg3_init_rings.
6459  *
6460  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6461  * posting buffers we only dirty the first cache line of the RX
6462  * descriptor (containing the address).  Whereas for the RX status
6463  * buffers the cpu only reads the last cacheline of the RX descriptor
6464  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6465  */
6466 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6467                              u32 opaque_key, u32 dest_idx_unmasked,
6468                              unsigned int *frag_size)
6469 {
6470         struct tg3_rx_buffer_desc *desc;
6471         struct ring_info *map;
6472         u8 *data;
6473         dma_addr_t mapping;
6474         int skb_size, data_size, dest_idx;
6475
6476         switch (opaque_key) {
6477         case RXD_OPAQUE_RING_STD:
6478                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6479                 desc = &tpr->rx_std[dest_idx];
6480                 map = &tpr->rx_std_buffers[dest_idx];
6481                 data_size = tp->rx_pkt_map_sz;
6482                 break;
6483
6484         case RXD_OPAQUE_RING_JUMBO:
6485                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6486                 desc = &tpr->rx_jmb[dest_idx].std;
6487                 map = &tpr->rx_jmb_buffers[dest_idx];
6488                 data_size = TG3_RX_JMB_MAP_SZ;
6489                 break;
6490
6491         default:
6492                 return -EINVAL;
6493         }
6494
6495         /* Do not overwrite any of the map or rp information
6496          * until we are sure we can commit to a new buffer.
6497          *
6498          * Callers depend upon this behavior and assume that
6499          * we leave everything unchanged if we fail.
6500          */
6501         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6502                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6503         if (skb_size <= PAGE_SIZE) {
6504                 data = netdev_alloc_frag(skb_size);
6505                 *frag_size = skb_size;
6506         } else {
6507                 data = kmalloc(skb_size, GFP_ATOMIC);
6508                 *frag_size = 0;
6509         }
6510         if (!data)
6511                 return -ENOMEM;
6512
6513         mapping = pci_map_single(tp->pdev,
6514                                  data + TG3_RX_OFFSET(tp),
6515                                  data_size,
6516                                  PCI_DMA_FROMDEVICE);
6517         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6518                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6519                 return -EIO;
6520         }
6521
6522         map->data = data;
6523         dma_unmap_addr_set(map, mapping, mapping);
6524
6525         desc->addr_hi = ((u64)mapping >> 32);
6526         desc->addr_lo = ((u64)mapping & 0xffffffff);
6527
6528         return data_size;
6529 }
6530
6531 /* We only need to move over in the address because the other
6532  * members of the RX descriptor are invariant.  See notes above
6533  * tg3_alloc_rx_data for full details.
6534  */
6535 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6536                            struct tg3_rx_prodring_set *dpr,
6537                            u32 opaque_key, int src_idx,
6538                            u32 dest_idx_unmasked)
6539 {
6540         struct tg3 *tp = tnapi->tp;
6541         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6542         struct ring_info *src_map, *dest_map;
6543         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6544         int dest_idx;
6545
6546         switch (opaque_key) {
6547         case RXD_OPAQUE_RING_STD:
6548                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6549                 dest_desc = &dpr->rx_std[dest_idx];
6550                 dest_map = &dpr->rx_std_buffers[dest_idx];
6551                 src_desc = &spr->rx_std[src_idx];
6552                 src_map = &spr->rx_std_buffers[src_idx];
6553                 break;
6554
6555         case RXD_OPAQUE_RING_JUMBO:
6556                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6557                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6558                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6559                 src_desc = &spr->rx_jmb[src_idx].std;
6560                 src_map = &spr->rx_jmb_buffers[src_idx];
6561                 break;
6562
6563         default:
6564                 return;
6565         }
6566
6567         dest_map->data = src_map->data;
6568         dma_unmap_addr_set(dest_map, mapping,
6569                            dma_unmap_addr(src_map, mapping));
6570         dest_desc->addr_hi = src_desc->addr_hi;
6571         dest_desc->addr_lo = src_desc->addr_lo;
6572
6573         /* Ensure that the update to the skb happens after the physical
6574          * addresses have been transferred to the new BD location.
6575          */
6576         smp_wmb();
6577
6578         src_map->data = NULL;
6579 }
6580
6581 /* The RX ring scheme is composed of multiple rings which post fresh
6582  * buffers to the chip, and one special ring the chip uses to report
6583  * status back to the host.
6584  *
6585  * The special ring reports the status of received packets to the
6586  * host.  The chip does not write into the original descriptor the
6587  * RX buffer was obtained from.  The chip simply takes the original
6588  * descriptor as provided by the host, updates the status and length
6589  * field, then writes this into the next status ring entry.
6590  *
6591  * Each ring the host uses to post buffers to the chip is described
6592  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6593  * it is first placed into the on-chip ram.  When the packet's length
6594  * is known, it walks down the TG3_BDINFO entries to select the ring.
6595  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6596  * which is within the range of the new packet's length is chosen.
6597  *
6598  * The "separate ring for rx status" scheme may sound queer, but it makes
6599  * sense from a cache coherency perspective.  If only the host writes
6600  * to the buffer post rings, and only the chip writes to the rx status
6601  * rings, then cache lines never move beyond shared-modified state.
6602  * If both the host and chip were to write into the same ring, cache line
6603  * eviction could occur since both entities want it in an exclusive state.
6604  */
6605 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6606 {
6607         struct tg3 *tp = tnapi->tp;
6608         u32 work_mask, rx_std_posted = 0;
6609         u32 std_prod_idx, jmb_prod_idx;
6610         u32 sw_idx = tnapi->rx_rcb_ptr;
6611         u16 hw_idx;
6612         int received;
6613         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6614
6615         hw_idx = *(tnapi->rx_rcb_prod_idx);
6616         /*
6617          * We need to order the read of hw_idx and the read of
6618          * the opaque cookie.
6619          */
6620         rmb();
6621         work_mask = 0;
6622         received = 0;
6623         std_prod_idx = tpr->rx_std_prod_idx;
6624         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6625         while (sw_idx != hw_idx && budget > 0) {
6626                 struct ring_info *ri;
6627                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6628                 unsigned int len;
6629                 struct sk_buff *skb;
6630                 dma_addr_t dma_addr;
6631                 u32 opaque_key, desc_idx, *post_ptr;
6632                 u8 *data;
6633                 u64 tstamp = 0;
6634
6635                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6636                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6637                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6638                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6639                         dma_addr = dma_unmap_addr(ri, mapping);
6640                         data = ri->data;
6641                         post_ptr = &std_prod_idx;
6642                         rx_std_posted++;
6643                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6644                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6645                         dma_addr = dma_unmap_addr(ri, mapping);
6646                         data = ri->data;
6647                         post_ptr = &jmb_prod_idx;
6648                 } else
6649                         goto next_pkt_nopost;
6650
6651                 work_mask |= opaque_key;
6652
6653                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6654                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6655                 drop_it:
6656                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6657                                        desc_idx, *post_ptr);
6658                 drop_it_no_recycle:
6659                         /* Other statistics kept track of by card. */
6660                         tp->rx_dropped++;
6661                         goto next_pkt;
6662                 }
6663
6664                 prefetch(data + TG3_RX_OFFSET(tp));
6665                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6666                       ETH_FCS_LEN;
6667
6668                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6669                      RXD_FLAG_PTPSTAT_PTPV1 ||
6670                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6671                      RXD_FLAG_PTPSTAT_PTPV2) {
6672                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6673                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6674                 }
6675
6676                 if (len > TG3_RX_COPY_THRESH(tp)) {
6677                         int skb_size;
6678                         unsigned int frag_size;
6679
6680                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6681                                                     *post_ptr, &frag_size);
6682                         if (skb_size < 0)
6683                                 goto drop_it;
6684
6685                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6686                                          PCI_DMA_FROMDEVICE);
6687
6688                         skb = build_skb(data, frag_size);
6689                         if (!skb) {
6690                                 tg3_frag_free(frag_size != 0, data);
6691                                 goto drop_it_no_recycle;
6692                         }
6693                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6694                         /* Ensure that the update to the data happens
6695                          * after the usage of the old DMA mapping.
6696                          */
6697                         smp_wmb();
6698
6699                         ri->data = NULL;
6700
6701                 } else {
6702                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6703                                        desc_idx, *post_ptr);
6704
6705                         skb = netdev_alloc_skb(tp->dev,
6706                                                len + TG3_RAW_IP_ALIGN);
6707                         if (skb == NULL)
6708                                 goto drop_it_no_recycle;
6709
6710                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6711                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6712                         memcpy(skb->data,
6713                                data + TG3_RX_OFFSET(tp),
6714                                len);
6715                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6716                 }
6717
6718                 skb_put(skb, len);
6719                 if (tstamp)
6720                         tg3_hwclock_to_timestamp(tp, tstamp,
6721                                                  skb_hwtstamps(skb));
6722
6723                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6724                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6725                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6726                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6727                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6728                 else
6729                         skb_checksum_none_assert(skb);
6730
6731                 skb->protocol = eth_type_trans(skb, tp->dev);
6732
6733                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6734                     skb->protocol != htons(ETH_P_8021Q)) {
6735                         dev_kfree_skb(skb);
6736                         goto drop_it_no_recycle;
6737                 }
6738
6739                 if (desc->type_flags & RXD_FLAG_VLAN &&
6740                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6741                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6742                                                desc->err_vlan & RXD_VLAN_MASK);
6743
6744                 napi_gro_receive(&tnapi->napi, skb);
6745
6746                 received++;
6747                 budget--;
6748
6749 next_pkt:
6750                 (*post_ptr)++;
6751
6752                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6753                         tpr->rx_std_prod_idx = std_prod_idx &
6754                                                tp->rx_std_ring_mask;
6755                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6756                                      tpr->rx_std_prod_idx);
6757                         work_mask &= ~RXD_OPAQUE_RING_STD;
6758                         rx_std_posted = 0;
6759                 }
6760 next_pkt_nopost:
6761                 sw_idx++;
6762                 sw_idx &= tp->rx_ret_ring_mask;
6763
6764                 /* Refresh hw_idx to see if there is new work */
6765                 if (sw_idx == hw_idx) {
6766                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6767                         rmb();
6768                 }
6769         }
6770
6771         /* ACK the status ring. */
6772         tnapi->rx_rcb_ptr = sw_idx;
6773         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6774
6775         /* Refill RX ring(s). */
6776         if (!tg3_flag(tp, ENABLE_RSS)) {
6777                 /* Sync BD data before updating mailbox */
6778                 wmb();
6779
6780                 if (work_mask & RXD_OPAQUE_RING_STD) {
6781                         tpr->rx_std_prod_idx = std_prod_idx &
6782                                                tp->rx_std_ring_mask;
6783                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6784                                      tpr->rx_std_prod_idx);
6785                 }
6786                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6787                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6788                                                tp->rx_jmb_ring_mask;
6789                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6790                                      tpr->rx_jmb_prod_idx);
6791                 }
6792                 mmiowb();
6793         } else if (work_mask) {
6794                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6795                  * updated before the producer indices can be updated.
6796                  */
6797                 smp_wmb();
6798
6799                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6800                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6801
6802                 if (tnapi != &tp->napi[1]) {
6803                         tp->rx_refill = true;
6804                         napi_schedule(&tp->napi[1].napi);
6805                 }
6806         }
6807
6808         return received;
6809 }
6810
6811 static void tg3_poll_link(struct tg3 *tp)
6812 {
6813         /* handle link change and other phy events */
6814         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6815                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6816
6817                 if (sblk->status & SD_STATUS_LINK_CHG) {
6818                         sblk->status = SD_STATUS_UPDATED |
6819                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6820                         spin_lock(&tp->lock);
6821                         if (tg3_flag(tp, USE_PHYLIB)) {
6822                                 tw32_f(MAC_STATUS,
6823                                      (MAC_STATUS_SYNC_CHANGED |
6824                                       MAC_STATUS_CFG_CHANGED |
6825                                       MAC_STATUS_MI_COMPLETION |
6826                                       MAC_STATUS_LNKSTATE_CHANGED));
6827                                 udelay(40);
6828                         } else
6829                                 tg3_setup_phy(tp, false);
6830                         spin_unlock(&tp->lock);
6831                 }
6832         }
6833 }
6834
6835 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6836                                 struct tg3_rx_prodring_set *dpr,
6837                                 struct tg3_rx_prodring_set *spr)
6838 {
6839         u32 si, di, cpycnt, src_prod_idx;
6840         int i, err = 0;
6841
6842         while (1) {
6843                 src_prod_idx = spr->rx_std_prod_idx;
6844
6845                 /* Make sure updates to the rx_std_buffers[] entries and the
6846                  * standard producer index are seen in the correct order.
6847                  */
6848                 smp_rmb();
6849
6850                 if (spr->rx_std_cons_idx == src_prod_idx)
6851                         break;
6852
6853                 if (spr->rx_std_cons_idx < src_prod_idx)
6854                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6855                 else
6856                         cpycnt = tp->rx_std_ring_mask + 1 -
6857                                  spr->rx_std_cons_idx;
6858
6859                 cpycnt = min(cpycnt,
6860                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6861
6862                 si = spr->rx_std_cons_idx;
6863                 di = dpr->rx_std_prod_idx;
6864
6865                 for (i = di; i < di + cpycnt; i++) {
6866                         if (dpr->rx_std_buffers[i].data) {
6867                                 cpycnt = i - di;
6868                                 err = -ENOSPC;
6869                                 break;
6870                         }
6871                 }
6872
6873                 if (!cpycnt)
6874                         break;
6875
6876                 /* Ensure that updates to the rx_std_buffers ring and the
6877                  * shadowed hardware producer ring from tg3_recycle_skb() are
6878                  * ordered correctly WRT the skb check above.
6879                  */
6880                 smp_rmb();
6881
6882                 memcpy(&dpr->rx_std_buffers[di],
6883                        &spr->rx_std_buffers[si],
6884                        cpycnt * sizeof(struct ring_info));
6885
6886                 for (i = 0; i < cpycnt; i++, di++, si++) {
6887                         struct tg3_rx_buffer_desc *sbd, *dbd;
6888                         sbd = &spr->rx_std[si];
6889                         dbd = &dpr->rx_std[di];
6890                         dbd->addr_hi = sbd->addr_hi;
6891                         dbd->addr_lo = sbd->addr_lo;
6892                 }
6893
6894                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6895                                        tp->rx_std_ring_mask;
6896                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6897                                        tp->rx_std_ring_mask;
6898         }
6899
6900         while (1) {
6901                 src_prod_idx = spr->rx_jmb_prod_idx;
6902
6903                 /* Make sure updates to the rx_jmb_buffers[] entries and
6904                  * the jumbo producer index are seen in the correct order.
6905                  */
6906                 smp_rmb();
6907
6908                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6909                         break;
6910
6911                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6912                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6913                 else
6914                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6915                                  spr->rx_jmb_cons_idx;
6916
6917                 cpycnt = min(cpycnt,
6918                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6919
6920                 si = spr->rx_jmb_cons_idx;
6921                 di = dpr->rx_jmb_prod_idx;
6922
6923                 for (i = di; i < di + cpycnt; i++) {
6924                         if (dpr->rx_jmb_buffers[i].data) {
6925                                 cpycnt = i - di;
6926                                 err = -ENOSPC;
6927                                 break;
6928                         }
6929                 }
6930
6931                 if (!cpycnt)
6932                         break;
6933
6934                 /* Ensure that updates to the rx_jmb_buffers ring and the
6935                  * shadowed hardware producer ring from tg3_recycle_skb() are
6936                  * ordered correctly WRT the skb check above.
6937                  */
6938                 smp_rmb();
6939
6940                 memcpy(&dpr->rx_jmb_buffers[di],
6941                        &spr->rx_jmb_buffers[si],
6942                        cpycnt * sizeof(struct ring_info));
6943
6944                 for (i = 0; i < cpycnt; i++, di++, si++) {
6945                         struct tg3_rx_buffer_desc *sbd, *dbd;
6946                         sbd = &spr->rx_jmb[si].std;
6947                         dbd = &dpr->rx_jmb[di].std;
6948                         dbd->addr_hi = sbd->addr_hi;
6949                         dbd->addr_lo = sbd->addr_lo;
6950                 }
6951
6952                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6953                                        tp->rx_jmb_ring_mask;
6954                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6955                                        tp->rx_jmb_ring_mask;
6956         }
6957
6958         return err;
6959 }
6960
6961 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6962 {
6963         struct tg3 *tp = tnapi->tp;
6964
6965         /* run TX completion thread */
6966         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6967                 tg3_tx(tnapi);
6968                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6969                         return work_done;
6970         }
6971
6972         if (!tnapi->rx_rcb_prod_idx)
6973                 return work_done;
6974
6975         /* run RX thread, within the bounds set by NAPI.
6976          * All RX "locking" is done by ensuring outside
6977          * code synchronizes with tg3->napi.poll()
6978          */
6979         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6980                 work_done += tg3_rx(tnapi, budget - work_done);
6981
6982         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6983                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6984                 int i, err = 0;
6985                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6986                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6987
6988                 tp->rx_refill = false;
6989                 for (i = 1; i <= tp->rxq_cnt; i++)
6990                         err |= tg3_rx_prodring_xfer(tp, dpr,
6991                                                     &tp->napi[i].prodring);
6992
6993                 wmb();
6994
6995                 if (std_prod_idx != dpr->rx_std_prod_idx)
6996                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6997                                      dpr->rx_std_prod_idx);
6998
6999                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7000                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7001                                      dpr->rx_jmb_prod_idx);
7002
7003                 mmiowb();
7004
7005                 if (err)
7006                         tw32_f(HOSTCC_MODE, tp->coal_now);
7007         }
7008
7009         return work_done;
7010 }
7011
7012 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7013 {
7014         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7015                 schedule_work(&tp->reset_task);
7016 }
7017
7018 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7019 {
7020         cancel_work_sync(&tp->reset_task);
7021         tg3_flag_clear(tp, RESET_TASK_PENDING);
7022         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7023 }
7024
7025 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7026 {
7027         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7028         struct tg3 *tp = tnapi->tp;
7029         int work_done = 0;
7030         struct tg3_hw_status *sblk = tnapi->hw_status;
7031
7032         while (1) {
7033                 work_done = tg3_poll_work(tnapi, work_done, budget);
7034
7035                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7036                         goto tx_recovery;
7037
7038                 if (unlikely(work_done >= budget))
7039                         break;
7040
7041                 /* tp->last_tag is used in tg3_int_reenable() below
7042                  * to tell the hw how much work has been processed,
7043                  * so we must read it before checking for more work.
7044                  */
7045                 tnapi->last_tag = sblk->status_tag;
7046                 tnapi->last_irq_tag = tnapi->last_tag;
7047                 rmb();
7048
7049                 /* check for RX/TX work to do */
7050                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7051                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7052
7053                         /* This test here is not race free, but will reduce
7054                          * the number of interrupts by looping again.
7055                          */
7056                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7057                                 continue;
7058
7059                         napi_complete(napi);
7060                         /* Reenable interrupts. */
7061                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7062
7063                         /* This test here is synchronized by napi_schedule()
7064                          * and napi_complete() to close the race condition.
7065                          */
7066                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7067                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7068                                                   HOSTCC_MODE_ENABLE |
7069                                                   tnapi->coal_now);
7070                         }
7071                         mmiowb();
7072                         break;
7073                 }
7074         }
7075
7076         return work_done;
7077
7078 tx_recovery:
7079         /* work_done is guaranteed to be less than budget. */
7080         napi_complete(napi);
7081         tg3_reset_task_schedule(tp);
7082         return work_done;
7083 }
7084
7085 static void tg3_process_error(struct tg3 *tp)
7086 {
7087         u32 val;
7088         bool real_error = false;
7089
7090         if (tg3_flag(tp, ERROR_PROCESSED))
7091                 return;
7092
7093         /* Check Flow Attention register */
7094         val = tr32(HOSTCC_FLOW_ATTN);
7095         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7096                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7097                 real_error = true;
7098         }
7099
7100         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7101                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7102                 real_error = true;
7103         }
7104
7105         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7106                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7107                 real_error = true;
7108         }
7109
7110         if (!real_error)
7111                 return;
7112
7113         tg3_dump_state(tp);
7114
7115         tg3_flag_set(tp, ERROR_PROCESSED);
7116         tg3_reset_task_schedule(tp);
7117 }
7118
7119 static int tg3_poll(struct napi_struct *napi, int budget)
7120 {
7121         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7122         struct tg3 *tp = tnapi->tp;
7123         int work_done = 0;
7124         struct tg3_hw_status *sblk = tnapi->hw_status;
7125
7126         while (1) {
7127                 if (sblk->status & SD_STATUS_ERROR)
7128                         tg3_process_error(tp);
7129
7130                 tg3_poll_link(tp);
7131
7132                 work_done = tg3_poll_work(tnapi, work_done, budget);
7133
7134                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7135                         goto tx_recovery;
7136
7137                 if (unlikely(work_done >= budget))
7138                         break;
7139
7140                 if (tg3_flag(tp, TAGGED_STATUS)) {
7141                         /* tp->last_tag is used in tg3_int_reenable() below
7142                          * to tell the hw how much work has been processed,
7143                          * so we must read it before checking for more work.
7144                          */
7145                         tnapi->last_tag = sblk->status_tag;
7146                         tnapi->last_irq_tag = tnapi->last_tag;
7147                         rmb();
7148                 } else
7149                         sblk->status &= ~SD_STATUS_UPDATED;
7150
7151                 if (likely(!tg3_has_work(tnapi))) {
7152                         napi_complete(napi);
7153                         tg3_int_reenable(tnapi);
7154                         break;
7155                 }
7156         }
7157
7158         return work_done;
7159
7160 tx_recovery:
7161         /* work_done is guaranteed to be less than budget. */
7162         napi_complete(napi);
7163         tg3_reset_task_schedule(tp);
7164         return work_done;
7165 }
7166
7167 static void tg3_napi_disable(struct tg3 *tp)
7168 {
7169         int i;
7170
7171         for (i = tp->irq_cnt - 1; i >= 0; i--)
7172                 napi_disable(&tp->napi[i].napi);
7173 }
7174
7175 static void tg3_napi_enable(struct tg3 *tp)
7176 {
7177         int i;
7178
7179         for (i = 0; i < tp->irq_cnt; i++)
7180                 napi_enable(&tp->napi[i].napi);
7181 }
7182
7183 static void tg3_napi_init(struct tg3 *tp)
7184 {
7185         int i;
7186
7187         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7188         for (i = 1; i < tp->irq_cnt; i++)
7189                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7190 }
7191
7192 static void tg3_napi_fini(struct tg3 *tp)
7193 {
7194         int i;
7195
7196         for (i = 0; i < tp->irq_cnt; i++)
7197                 netif_napi_del(&tp->napi[i].napi);
7198 }
7199
7200 static inline void tg3_netif_stop(struct tg3 *tp)
7201 {
7202         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7203         tg3_napi_disable(tp);
7204         netif_carrier_off(tp->dev);
7205         netif_tx_disable(tp->dev);
7206 }
7207
7208 /* tp->lock must be held */
7209 static inline void tg3_netif_start(struct tg3 *tp)
7210 {
7211         tg3_ptp_resume(tp);
7212
7213         /* NOTE: unconditional netif_tx_wake_all_queues is only
7214          * appropriate so long as all callers are assured to
7215          * have free tx slots (such as after tg3_init_hw)
7216          */
7217         netif_tx_wake_all_queues(tp->dev);
7218
7219         if (tp->link_up)
7220                 netif_carrier_on(tp->dev);
7221
7222         tg3_napi_enable(tp);
7223         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7224         tg3_enable_ints(tp);
7225 }
7226
7227 static void tg3_irq_quiesce(struct tg3 *tp)
7228 {
7229         int i;
7230
7231         BUG_ON(tp->irq_sync);
7232
7233         tp->irq_sync = 1;
7234         smp_mb();
7235
7236         for (i = 0; i < tp->irq_cnt; i++)
7237                 synchronize_irq(tp->napi[i].irq_vec);
7238 }
7239
7240 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7241  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7242  * with as well.  Most of the time, this is not necessary except when
7243  * shutting down the device.
7244  */
7245 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7246 {
7247         spin_lock_bh(&tp->lock);
7248         if (irq_sync)
7249                 tg3_irq_quiesce(tp);
7250 }
7251
7252 static inline void tg3_full_unlock(struct tg3 *tp)
7253 {
7254         spin_unlock_bh(&tp->lock);
7255 }
7256
7257 /* One-shot MSI handler - Chip automatically disables interrupt
7258  * after sending MSI so driver doesn't have to do it.
7259  */
7260 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7261 {
7262         struct tg3_napi *tnapi = dev_id;
7263         struct tg3 *tp = tnapi->tp;
7264
7265         prefetch(tnapi->hw_status);
7266         if (tnapi->rx_rcb)
7267                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7268
7269         if (likely(!tg3_irq_sync(tp)))
7270                 napi_schedule(&tnapi->napi);
7271
7272         return IRQ_HANDLED;
7273 }
7274
7275 /* MSI ISR - No need to check for interrupt sharing and no need to
7276  * flush status block and interrupt mailbox. PCI ordering rules
7277  * guarantee that MSI will arrive after the status block.
7278  */
7279 static irqreturn_t tg3_msi(int irq, void *dev_id)
7280 {
7281         struct tg3_napi *tnapi = dev_id;
7282         struct tg3 *tp = tnapi->tp;
7283
7284         prefetch(tnapi->hw_status);
7285         if (tnapi->rx_rcb)
7286                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7287         /*
7288          * Writing any value to intr-mbox-0 clears PCI INTA# and
7289          * chip-internal interrupt pending events.
7290          * Writing non-zero to intr-mbox-0 additional tells the
7291          * NIC to stop sending us irqs, engaging "in-intr-handler"
7292          * event coalescing.
7293          */
7294         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7295         if (likely(!tg3_irq_sync(tp)))
7296                 napi_schedule(&tnapi->napi);
7297
7298         return IRQ_RETVAL(1);
7299 }
7300
7301 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7302 {
7303         struct tg3_napi *tnapi = dev_id;
7304         struct tg3 *tp = tnapi->tp;
7305         struct tg3_hw_status *sblk = tnapi->hw_status;
7306         unsigned int handled = 1;
7307
7308         /* In INTx mode, it is possible for the interrupt to arrive at
7309          * the CPU before the status block posted prior to the interrupt.
7310          * Reading the PCI State register will confirm whether the
7311          * interrupt is ours and will flush the status block.
7312          */
7313         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7314                 if (tg3_flag(tp, CHIP_RESETTING) ||
7315                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7316                         handled = 0;
7317                         goto out;
7318                 }
7319         }
7320
7321         /*
7322          * Writing any value to intr-mbox-0 clears PCI INTA# and
7323          * chip-internal interrupt pending events.
7324          * Writing non-zero to intr-mbox-0 additional tells the
7325          * NIC to stop sending us irqs, engaging "in-intr-handler"
7326          * event coalescing.
7327          *
7328          * Flush the mailbox to de-assert the IRQ immediately to prevent
7329          * spurious interrupts.  The flush impacts performance but
7330          * excessive spurious interrupts can be worse in some cases.
7331          */
7332         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7333         if (tg3_irq_sync(tp))
7334                 goto out;
7335         sblk->status &= ~SD_STATUS_UPDATED;
7336         if (likely(tg3_has_work(tnapi))) {
7337                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7338                 napi_schedule(&tnapi->napi);
7339         } else {
7340                 /* No work, shared interrupt perhaps?  re-enable
7341                  * interrupts, and flush that PCI write
7342                  */
7343                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7344                                0x00000000);
7345         }
7346 out:
7347         return IRQ_RETVAL(handled);
7348 }
7349
7350 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7351 {
7352         struct tg3_napi *tnapi = dev_id;
7353         struct tg3 *tp = tnapi->tp;
7354         struct tg3_hw_status *sblk = tnapi->hw_status;
7355         unsigned int handled = 1;
7356
7357         /* In INTx mode, it is possible for the interrupt to arrive at
7358          * the CPU before the status block posted prior to the interrupt.
7359          * Reading the PCI State register will confirm whether the
7360          * interrupt is ours and will flush the status block.
7361          */
7362         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7363                 if (tg3_flag(tp, CHIP_RESETTING) ||
7364                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7365                         handled = 0;
7366                         goto out;
7367                 }
7368         }
7369
7370         /*
7371          * writing any value to intr-mbox-0 clears PCI INTA# and
7372          * chip-internal interrupt pending events.
7373          * writing non-zero to intr-mbox-0 additional tells the
7374          * NIC to stop sending us irqs, engaging "in-intr-handler"
7375          * event coalescing.
7376          *
7377          * Flush the mailbox to de-assert the IRQ immediately to prevent
7378          * spurious interrupts.  The flush impacts performance but
7379          * excessive spurious interrupts can be worse in some cases.
7380          */
7381         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7382
7383         /*
7384          * In a shared interrupt configuration, sometimes other devices'
7385          * interrupts will scream.  We record the current status tag here
7386          * so that the above check can report that the screaming interrupts
7387          * are unhandled.  Eventually they will be silenced.
7388          */
7389         tnapi->last_irq_tag = sblk->status_tag;
7390
7391         if (tg3_irq_sync(tp))
7392                 goto out;
7393
7394         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7395
7396         napi_schedule(&tnapi->napi);
7397
7398 out:
7399         return IRQ_RETVAL(handled);
7400 }
7401
7402 /* ISR for interrupt test */
7403 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7404 {
7405         struct tg3_napi *tnapi = dev_id;
7406         struct tg3 *tp = tnapi->tp;
7407         struct tg3_hw_status *sblk = tnapi->hw_status;
7408
7409         if ((sblk->status & SD_STATUS_UPDATED) ||
7410             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7411                 tg3_disable_ints(tp);
7412                 return IRQ_RETVAL(1);
7413         }
7414         return IRQ_RETVAL(0);
7415 }
7416
7417 #ifdef CONFIG_NET_POLL_CONTROLLER
7418 static void tg3_poll_controller(struct net_device *dev)
7419 {
7420         int i;
7421         struct tg3 *tp = netdev_priv(dev);
7422
7423         if (tg3_irq_sync(tp))
7424                 return;
7425
7426         for (i = 0; i < tp->irq_cnt; i++)
7427                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7428 }
7429 #endif
7430
7431 static void tg3_tx_timeout(struct net_device *dev)
7432 {
7433         struct tg3 *tp = netdev_priv(dev);
7434
7435         if (netif_msg_tx_err(tp)) {
7436                 netdev_err(dev, "transmit timed out, resetting\n");
7437                 tg3_dump_state(tp);
7438         }
7439
7440         tg3_reset_task_schedule(tp);
7441 }
7442
7443 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7444 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7445 {
7446         u32 base = (u32) mapping & 0xffffffff;
7447
7448         return (base > 0xffffdcc0) && (base + len + 8 < base);
7449 }
7450
7451 /* Test for DMA addresses > 40-bit */
7452 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7453                                           int len)
7454 {
7455 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7456         if (tg3_flag(tp, 40BIT_DMA_BUG))
7457                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7458         return 0;
7459 #else
7460         return 0;
7461 #endif
7462 }
7463
7464 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7465                                  dma_addr_t mapping, u32 len, u32 flags,
7466                                  u32 mss, u32 vlan)
7467 {
7468         txbd->addr_hi = ((u64) mapping >> 32);
7469         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7470         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7471         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7472 }
7473
7474 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7475                             dma_addr_t map, u32 len, u32 flags,
7476                             u32 mss, u32 vlan)
7477 {
7478         struct tg3 *tp = tnapi->tp;
7479         bool hwbug = false;
7480
7481         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7482                 hwbug = true;
7483
7484         if (tg3_4g_overflow_test(map, len))
7485                 hwbug = true;
7486
7487         if (tg3_40bit_overflow_test(tp, map, len))
7488                 hwbug = true;
7489
7490         if (tp->dma_limit) {
7491                 u32 prvidx = *entry;
7492                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7493                 while (len > tp->dma_limit && *budget) {
7494                         u32 frag_len = tp->dma_limit;
7495                         len -= tp->dma_limit;
7496
7497                         /* Avoid the 8byte DMA problem */
7498                         if (len <= 8) {
7499                                 len += tp->dma_limit / 2;
7500                                 frag_len = tp->dma_limit / 2;
7501                         }
7502
7503                         tnapi->tx_buffers[*entry].fragmented = true;
7504
7505                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7506                                       frag_len, tmp_flag, mss, vlan);
7507                         *budget -= 1;
7508                         prvidx = *entry;
7509                         *entry = NEXT_TX(*entry);
7510
7511                         map += frag_len;
7512                 }
7513
7514                 if (len) {
7515                         if (*budget) {
7516                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7517                                               len, flags, mss, vlan);
7518                                 *budget -= 1;
7519                                 *entry = NEXT_TX(*entry);
7520                         } else {
7521                                 hwbug = true;
7522                                 tnapi->tx_buffers[prvidx].fragmented = false;
7523                         }
7524                 }
7525         } else {
7526                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7527                               len, flags, mss, vlan);
7528                 *entry = NEXT_TX(*entry);
7529         }
7530
7531         return hwbug;
7532 }
7533
7534 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7535 {
7536         int i;
7537         struct sk_buff *skb;
7538         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7539
7540         skb = txb->skb;
7541         txb->skb = NULL;
7542
7543         pci_unmap_single(tnapi->tp->pdev,
7544                          dma_unmap_addr(txb, mapping),
7545                          skb_headlen(skb),
7546                          PCI_DMA_TODEVICE);
7547
7548         while (txb->fragmented) {
7549                 txb->fragmented = false;
7550                 entry = NEXT_TX(entry);
7551                 txb = &tnapi->tx_buffers[entry];
7552         }
7553
7554         for (i = 0; i <= last; i++) {
7555                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7556
7557                 entry = NEXT_TX(entry);
7558                 txb = &tnapi->tx_buffers[entry];
7559
7560                 pci_unmap_page(tnapi->tp->pdev,
7561                                dma_unmap_addr(txb, mapping),
7562                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7563
7564                 while (txb->fragmented) {
7565                         txb->fragmented = false;
7566                         entry = NEXT_TX(entry);
7567                         txb = &tnapi->tx_buffers[entry];
7568                 }
7569         }
7570 }
7571
7572 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7573 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7574                                        struct sk_buff **pskb,
7575                                        u32 *entry, u32 *budget,
7576                                        u32 base_flags, u32 mss, u32 vlan)
7577 {
7578         struct tg3 *tp = tnapi->tp;
7579         struct sk_buff *new_skb, *skb = *pskb;
7580         dma_addr_t new_addr = 0;
7581         int ret = 0;
7582
7583         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7584                 new_skb = skb_copy(skb, GFP_ATOMIC);
7585         else {
7586                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7587
7588                 new_skb = skb_copy_expand(skb,
7589                                           skb_headroom(skb) + more_headroom,
7590                                           skb_tailroom(skb), GFP_ATOMIC);
7591         }
7592
7593         if (!new_skb) {
7594                 ret = -1;
7595         } else {
7596                 /* New SKB is guaranteed to be linear. */
7597                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7598                                           PCI_DMA_TODEVICE);
7599                 /* Make sure the mapping succeeded */
7600                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7601                         dev_kfree_skb(new_skb);
7602                         ret = -1;
7603                 } else {
7604                         u32 save_entry = *entry;
7605
7606                         base_flags |= TXD_FLAG_END;
7607
7608                         tnapi->tx_buffers[*entry].skb = new_skb;
7609                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7610                                            mapping, new_addr);
7611
7612                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7613                                             new_skb->len, base_flags,
7614                                             mss, vlan)) {
7615                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7616                                 dev_kfree_skb(new_skb);
7617                                 ret = -1;
7618                         }
7619                 }
7620         }
7621
7622         dev_kfree_skb(skb);
7623         *pskb = new_skb;
7624         return ret;
7625 }
7626
7627 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7628
7629 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7630  * TSO header is greater than 80 bytes.
7631  */
7632 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7633 {
7634         struct sk_buff *segs, *nskb;
7635         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7636
7637         /* Estimate the number of fragments in the worst case */
7638         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7639                 netif_stop_queue(tp->dev);
7640
7641                 /* netif_tx_stop_queue() must be done before checking
7642                  * checking tx index in tg3_tx_avail() below, because in
7643                  * tg3_tx(), we update tx index before checking for
7644                  * netif_tx_queue_stopped().
7645                  */
7646                 smp_mb();
7647                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7648                         return NETDEV_TX_BUSY;
7649
7650                 netif_wake_queue(tp->dev);
7651         }
7652
7653         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7654         if (IS_ERR(segs))
7655                 goto tg3_tso_bug_end;
7656
7657         do {
7658                 nskb = segs;
7659                 segs = segs->next;
7660                 nskb->next = NULL;
7661                 tg3_start_xmit(nskb, tp->dev);
7662         } while (segs);
7663
7664 tg3_tso_bug_end:
7665         dev_kfree_skb(skb);
7666
7667         return NETDEV_TX_OK;
7668 }
7669
7670 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7671  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7672  */
7673 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7674 {
7675         struct tg3 *tp = netdev_priv(dev);
7676         u32 len, entry, base_flags, mss, vlan = 0;
7677         u32 budget;
7678         int i = -1, would_hit_hwbug;
7679         dma_addr_t mapping;
7680         struct tg3_napi *tnapi;
7681         struct netdev_queue *txq;
7682         unsigned int last;
7683
7684         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7685         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7686         if (tg3_flag(tp, ENABLE_TSS))
7687                 tnapi++;
7688
7689         budget = tg3_tx_avail(tnapi);
7690
7691         /* We are running in BH disabled context with netif_tx_lock
7692          * and TX reclaim runs via tp->napi.poll inside of a software
7693          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7694          * no IRQ context deadlocks to worry about either.  Rejoice!
7695          */
7696         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7697                 if (!netif_tx_queue_stopped(txq)) {
7698                         netif_tx_stop_queue(txq);
7699
7700                         /* This is a hard error, log it. */
7701                         netdev_err(dev,
7702                                    "BUG! Tx Ring full when queue awake!\n");
7703                 }
7704                 return NETDEV_TX_BUSY;
7705         }
7706
7707         entry = tnapi->tx_prod;
7708         base_flags = 0;
7709         if (skb->ip_summed == CHECKSUM_PARTIAL)
7710                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7711
7712         mss = skb_shinfo(skb)->gso_size;
7713         if (mss) {
7714                 struct iphdr *iph;
7715                 u32 tcp_opt_len, hdr_len;
7716
7717                 if (skb_header_cloned(skb) &&
7718                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7719                         goto drop;
7720
7721                 iph = ip_hdr(skb);
7722                 tcp_opt_len = tcp_optlen(skb);
7723
7724                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7725
7726                 if (!skb_is_gso_v6(skb)) {
7727                         iph->check = 0;
7728                         iph->tot_len = htons(mss + hdr_len);
7729                 }
7730
7731                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7732                     tg3_flag(tp, TSO_BUG))
7733                         return tg3_tso_bug(tp, skb);
7734
7735                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7736                                TXD_FLAG_CPU_POST_DMA);
7737
7738                 if (tg3_flag(tp, HW_TSO_1) ||
7739                     tg3_flag(tp, HW_TSO_2) ||
7740                     tg3_flag(tp, HW_TSO_3)) {
7741                         tcp_hdr(skb)->check = 0;
7742                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7743                 } else
7744                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7745                                                                  iph->daddr, 0,
7746                                                                  IPPROTO_TCP,
7747                                                                  0);
7748
7749                 if (tg3_flag(tp, HW_TSO_3)) {
7750                         mss |= (hdr_len & 0xc) << 12;
7751                         if (hdr_len & 0x10)
7752                                 base_flags |= 0x00000010;
7753                         base_flags |= (hdr_len & 0x3e0) << 5;
7754                 } else if (tg3_flag(tp, HW_TSO_2))
7755                         mss |= hdr_len << 9;
7756                 else if (tg3_flag(tp, HW_TSO_1) ||
7757                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7758                         if (tcp_opt_len || iph->ihl > 5) {
7759                                 int tsflags;
7760
7761                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7762                                 mss |= (tsflags << 11);
7763                         }
7764                 } else {
7765                         if (tcp_opt_len || iph->ihl > 5) {
7766                                 int tsflags;
7767
7768                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7769                                 base_flags |= tsflags << 12;
7770                         }
7771                 }
7772         }
7773
7774         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7775             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7776                 base_flags |= TXD_FLAG_JMB_PKT;
7777
7778         if (vlan_tx_tag_present(skb)) {
7779                 base_flags |= TXD_FLAG_VLAN;
7780                 vlan = vlan_tx_tag_get(skb);
7781         }
7782
7783         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7784             tg3_flag(tp, TX_TSTAMP_EN)) {
7785                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7786                 base_flags |= TXD_FLAG_HWTSTAMP;
7787         }
7788
7789         len = skb_headlen(skb);
7790
7791         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7792         if (pci_dma_mapping_error(tp->pdev, mapping))
7793                 goto drop;
7794
7795
7796         tnapi->tx_buffers[entry].skb = skb;
7797         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7798
7799         would_hit_hwbug = 0;
7800
7801         if (tg3_flag(tp, 5701_DMA_BUG))
7802                 would_hit_hwbug = 1;
7803
7804         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7805                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7806                             mss, vlan)) {
7807                 would_hit_hwbug = 1;
7808         } else if (skb_shinfo(skb)->nr_frags > 0) {
7809                 u32 tmp_mss = mss;
7810
7811                 if (!tg3_flag(tp, HW_TSO_1) &&
7812                     !tg3_flag(tp, HW_TSO_2) &&
7813                     !tg3_flag(tp, HW_TSO_3))
7814                         tmp_mss = 0;
7815
7816                 /* Now loop through additional data
7817                  * fragments, and queue them.
7818                  */
7819                 last = skb_shinfo(skb)->nr_frags - 1;
7820                 for (i = 0; i <= last; i++) {
7821                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7822
7823                         len = skb_frag_size(frag);
7824                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7825                                                    len, DMA_TO_DEVICE);
7826
7827                         tnapi->tx_buffers[entry].skb = NULL;
7828                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7829                                            mapping);
7830                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7831                                 goto dma_error;
7832
7833                         if (!budget ||
7834                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7835                                             len, base_flags |
7836                                             ((i == last) ? TXD_FLAG_END : 0),
7837                                             tmp_mss, vlan)) {
7838                                 would_hit_hwbug = 1;
7839                                 break;
7840                         }
7841                 }
7842         }
7843
7844         if (would_hit_hwbug) {
7845                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7846
7847                 /* If the workaround fails due to memory/mapping
7848                  * failure, silently drop this packet.
7849                  */
7850                 entry = tnapi->tx_prod;
7851                 budget = tg3_tx_avail(tnapi);
7852                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7853                                                 base_flags, mss, vlan))
7854                         goto drop_nofree;
7855         }
7856
7857         skb_tx_timestamp(skb);
7858         netdev_tx_sent_queue(txq, skb->len);
7859
7860         /* Sync BD data before updating mailbox */
7861         wmb();
7862
7863         /* Packets are ready, update Tx producer idx local and on card. */
7864         tw32_tx_mbox(tnapi->prodmbox, entry);
7865
7866         tnapi->tx_prod = entry;
7867         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7868                 netif_tx_stop_queue(txq);
7869
7870                 /* netif_tx_stop_queue() must be done before checking
7871                  * checking tx index in tg3_tx_avail() below, because in
7872                  * tg3_tx(), we update tx index before checking for
7873                  * netif_tx_queue_stopped().
7874                  */
7875                 smp_mb();
7876                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7877                         netif_tx_wake_queue(txq);
7878         }
7879
7880         mmiowb();
7881         return NETDEV_TX_OK;
7882
7883 dma_error:
7884         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7885         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7886 drop:
7887         dev_kfree_skb(skb);
7888 drop_nofree:
7889         tp->tx_dropped++;
7890         return NETDEV_TX_OK;
7891 }
7892
7893 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7894 {
7895         if (enable) {
7896                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7897                                   MAC_MODE_PORT_MODE_MASK);
7898
7899                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7900
7901                 if (!tg3_flag(tp, 5705_PLUS))
7902                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7903
7904                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7905                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7906                 else
7907                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7908         } else {
7909                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7910
7911                 if (tg3_flag(tp, 5705_PLUS) ||
7912                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7913                     tg3_asic_rev(tp) == ASIC_REV_5700)
7914                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7915         }
7916
7917         tw32(MAC_MODE, tp->mac_mode);
7918         udelay(40);
7919 }
7920
7921 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7922 {
7923         u32 val, bmcr, mac_mode, ptest = 0;
7924
7925         tg3_phy_toggle_apd(tp, false);
7926         tg3_phy_toggle_automdix(tp, false);
7927
7928         if (extlpbk && tg3_phy_set_extloopbk(tp))
7929                 return -EIO;
7930
7931         bmcr = BMCR_FULLDPLX;
7932         switch (speed) {
7933         case SPEED_10:
7934                 break;
7935         case SPEED_100:
7936                 bmcr |= BMCR_SPEED100;
7937                 break;
7938         case SPEED_1000:
7939         default:
7940                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7941                         speed = SPEED_100;
7942                         bmcr |= BMCR_SPEED100;
7943                 } else {
7944                         speed = SPEED_1000;
7945                         bmcr |= BMCR_SPEED1000;
7946                 }
7947         }
7948
7949         if (extlpbk) {
7950                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7951                         tg3_readphy(tp, MII_CTRL1000, &val);
7952                         val |= CTL1000_AS_MASTER |
7953                                CTL1000_ENABLE_MASTER;
7954                         tg3_writephy(tp, MII_CTRL1000, val);
7955                 } else {
7956                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7957                                 MII_TG3_FET_PTEST_TRIM_2;
7958                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7959                 }
7960         } else
7961                 bmcr |= BMCR_LOOPBACK;
7962
7963         tg3_writephy(tp, MII_BMCR, bmcr);
7964
7965         /* The write needs to be flushed for the FETs */
7966         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7967                 tg3_readphy(tp, MII_BMCR, &bmcr);
7968
7969         udelay(40);
7970
7971         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7972             tg3_asic_rev(tp) == ASIC_REV_5785) {
7973                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7974                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7975                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7976
7977                 /* The write needs to be flushed for the AC131 */
7978                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7979         }
7980
7981         /* Reset to prevent losing 1st rx packet intermittently */
7982         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7983             tg3_flag(tp, 5780_CLASS)) {
7984                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7985                 udelay(10);
7986                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7987         }
7988
7989         mac_mode = tp->mac_mode &
7990                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7991         if (speed == SPEED_1000)
7992                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7993         else
7994                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7995
7996         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7997                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7998
7999                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8000                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8001                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8002                         mac_mode |= MAC_MODE_LINK_POLARITY;
8003
8004                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8005                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8006         }
8007
8008         tw32(MAC_MODE, mac_mode);
8009         udelay(40);
8010
8011         return 0;
8012 }
8013
8014 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8015 {
8016         struct tg3 *tp = netdev_priv(dev);
8017
8018         if (features & NETIF_F_LOOPBACK) {
8019                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8020                         return;
8021
8022                 spin_lock_bh(&tp->lock);
8023                 tg3_mac_loopback(tp, true);
8024                 netif_carrier_on(tp->dev);
8025                 spin_unlock_bh(&tp->lock);
8026                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8027         } else {
8028                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8029                         return;
8030
8031                 spin_lock_bh(&tp->lock);
8032                 tg3_mac_loopback(tp, false);
8033                 /* Force link status check */
8034                 tg3_setup_phy(tp, true);
8035                 spin_unlock_bh(&tp->lock);
8036                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8037         }
8038 }
8039
8040 static netdev_features_t tg3_fix_features(struct net_device *dev,
8041         netdev_features_t features)
8042 {
8043         struct tg3 *tp = netdev_priv(dev);
8044
8045         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8046                 features &= ~NETIF_F_ALL_TSO;
8047
8048         return features;
8049 }
8050
8051 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8052 {
8053         netdev_features_t changed = dev->features ^ features;
8054
8055         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8056                 tg3_set_loopback(dev, features);
8057
8058         return 0;
8059 }
8060
8061 static void tg3_rx_prodring_free(struct tg3 *tp,
8062                                  struct tg3_rx_prodring_set *tpr)
8063 {
8064         int i;
8065
8066         if (tpr != &tp->napi[0].prodring) {
8067                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8068                      i = (i + 1) & tp->rx_std_ring_mask)
8069                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8070                                         tp->rx_pkt_map_sz);
8071
8072                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8073                         for (i = tpr->rx_jmb_cons_idx;
8074                              i != tpr->rx_jmb_prod_idx;
8075                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8076                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8077                                                 TG3_RX_JMB_MAP_SZ);
8078                         }
8079                 }
8080
8081                 return;
8082         }
8083
8084         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8085                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8086                                 tp->rx_pkt_map_sz);
8087
8088         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8089                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8090                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8091                                         TG3_RX_JMB_MAP_SZ);
8092         }
8093 }
8094
8095 /* Initialize rx rings for packet processing.
8096  *
8097  * The chip has been shut down and the driver detached from
8098  * the networking, so no interrupts or new tx packets will
8099  * end up in the driver.  tp->{tx,}lock are held and thus
8100  * we may not sleep.
8101  */
8102 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8103                                  struct tg3_rx_prodring_set *tpr)
8104 {
8105         u32 i, rx_pkt_dma_sz;
8106
8107         tpr->rx_std_cons_idx = 0;
8108         tpr->rx_std_prod_idx = 0;
8109         tpr->rx_jmb_cons_idx = 0;
8110         tpr->rx_jmb_prod_idx = 0;
8111
8112         if (tpr != &tp->napi[0].prodring) {
8113                 memset(&tpr->rx_std_buffers[0], 0,
8114                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8115                 if (tpr->rx_jmb_buffers)
8116                         memset(&tpr->rx_jmb_buffers[0], 0,
8117                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8118                 goto done;
8119         }
8120
8121         /* Zero out all descriptors. */
8122         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8123
8124         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8125         if (tg3_flag(tp, 5780_CLASS) &&
8126             tp->dev->mtu > ETH_DATA_LEN)
8127                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8128         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8129
8130         /* Initialize invariants of the rings, we only set this
8131          * stuff once.  This works because the card does not
8132          * write into the rx buffer posting rings.
8133          */
8134         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8135                 struct tg3_rx_buffer_desc *rxd;
8136
8137                 rxd = &tpr->rx_std[i];
8138                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8139                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8140                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8141                                (i << RXD_OPAQUE_INDEX_SHIFT));
8142         }
8143
8144         /* Now allocate fresh SKBs for each rx ring. */
8145         for (i = 0; i < tp->rx_pending; i++) {
8146                 unsigned int frag_size;
8147
8148                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8149                                       &frag_size) < 0) {
8150                         netdev_warn(tp->dev,
8151                                     "Using a smaller RX standard ring. Only "
8152                                     "%d out of %d buffers were allocated "
8153                                     "successfully\n", i, tp->rx_pending);
8154                         if (i == 0)
8155                                 goto initfail;
8156                         tp->rx_pending = i;
8157                         break;
8158                 }
8159         }
8160
8161         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8162                 goto done;
8163
8164         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8165
8166         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8167                 goto done;
8168
8169         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8170                 struct tg3_rx_buffer_desc *rxd;
8171
8172                 rxd = &tpr->rx_jmb[i].std;
8173                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8174                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8175                                   RXD_FLAG_JUMBO;
8176                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8177                        (i << RXD_OPAQUE_INDEX_SHIFT));
8178         }
8179
8180         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8181                 unsigned int frag_size;
8182
8183                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8184                                       &frag_size) < 0) {
8185                         netdev_warn(tp->dev,
8186                                     "Using a smaller RX jumbo ring. Only %d "
8187                                     "out of %d buffers were allocated "
8188                                     "successfully\n", i, tp->rx_jumbo_pending);
8189                         if (i == 0)
8190                                 goto initfail;
8191                         tp->rx_jumbo_pending = i;
8192                         break;
8193                 }
8194         }
8195
8196 done:
8197         return 0;
8198
8199 initfail:
8200         tg3_rx_prodring_free(tp, tpr);
8201         return -ENOMEM;
8202 }
8203
8204 static void tg3_rx_prodring_fini(struct tg3 *tp,
8205                                  struct tg3_rx_prodring_set *tpr)
8206 {
8207         kfree(tpr->rx_std_buffers);
8208         tpr->rx_std_buffers = NULL;
8209         kfree(tpr->rx_jmb_buffers);
8210         tpr->rx_jmb_buffers = NULL;
8211         if (tpr->rx_std) {
8212                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8213                                   tpr->rx_std, tpr->rx_std_mapping);
8214                 tpr->rx_std = NULL;
8215         }
8216         if (tpr->rx_jmb) {
8217                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8218                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8219                 tpr->rx_jmb = NULL;
8220         }
8221 }
8222
8223 static int tg3_rx_prodring_init(struct tg3 *tp,
8224                                 struct tg3_rx_prodring_set *tpr)
8225 {
8226         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8227                                       GFP_KERNEL);
8228         if (!tpr->rx_std_buffers)
8229                 return -ENOMEM;
8230
8231         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8232                                          TG3_RX_STD_RING_BYTES(tp),
8233                                          &tpr->rx_std_mapping,
8234                                          GFP_KERNEL);
8235         if (!tpr->rx_std)
8236                 goto err_out;
8237
8238         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8239                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8240                                               GFP_KERNEL);
8241                 if (!tpr->rx_jmb_buffers)
8242                         goto err_out;
8243
8244                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8245                                                  TG3_RX_JMB_RING_BYTES(tp),
8246                                                  &tpr->rx_jmb_mapping,
8247                                                  GFP_KERNEL);
8248                 if (!tpr->rx_jmb)
8249                         goto err_out;
8250         }
8251
8252         return 0;
8253
8254 err_out:
8255         tg3_rx_prodring_fini(tp, tpr);
8256         return -ENOMEM;
8257 }
8258
8259 /* Free up pending packets in all rx/tx rings.
8260  *
8261  * The chip has been shut down and the driver detached from
8262  * the networking, so no interrupts or new tx packets will
8263  * end up in the driver.  tp->{tx,}lock is not held and we are not
8264  * in an interrupt context and thus may sleep.
8265  */
8266 static void tg3_free_rings(struct tg3 *tp)
8267 {
8268         int i, j;
8269
8270         for (j = 0; j < tp->irq_cnt; j++) {
8271                 struct tg3_napi *tnapi = &tp->napi[j];
8272
8273                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8274
8275                 if (!tnapi->tx_buffers)
8276                         continue;
8277
8278                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8279                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8280
8281                         if (!skb)
8282                                 continue;
8283
8284                         tg3_tx_skb_unmap(tnapi, i,
8285                                          skb_shinfo(skb)->nr_frags - 1);
8286
8287                         dev_kfree_skb_any(skb);
8288                 }
8289                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8290         }
8291 }
8292
8293 /* Initialize tx/rx rings for packet processing.
8294  *
8295  * The chip has been shut down and the driver detached from
8296  * the networking, so no interrupts or new tx packets will
8297  * end up in the driver.  tp->{tx,}lock are held and thus
8298  * we may not sleep.
8299  */
8300 static int tg3_init_rings(struct tg3 *tp)
8301 {
8302         int i;
8303
8304         /* Free up all the SKBs. */
8305         tg3_free_rings(tp);
8306
8307         for (i = 0; i < tp->irq_cnt; i++) {
8308                 struct tg3_napi *tnapi = &tp->napi[i];
8309
8310                 tnapi->last_tag = 0;
8311                 tnapi->last_irq_tag = 0;
8312                 tnapi->hw_status->status = 0;
8313                 tnapi->hw_status->status_tag = 0;
8314                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8315
8316                 tnapi->tx_prod = 0;
8317                 tnapi->tx_cons = 0;
8318                 if (tnapi->tx_ring)
8319                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8320
8321                 tnapi->rx_rcb_ptr = 0;
8322                 if (tnapi->rx_rcb)
8323                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8324
8325                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8326                         tg3_free_rings(tp);
8327                         return -ENOMEM;
8328                 }
8329         }
8330
8331         return 0;
8332 }
8333
8334 static void tg3_mem_tx_release(struct tg3 *tp)
8335 {
8336         int i;
8337
8338         for (i = 0; i < tp->irq_max; i++) {
8339                 struct tg3_napi *tnapi = &tp->napi[i];
8340
8341                 if (tnapi->tx_ring) {
8342                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8343                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8344                         tnapi->tx_ring = NULL;
8345                 }
8346
8347                 kfree(tnapi->tx_buffers);
8348                 tnapi->tx_buffers = NULL;
8349         }
8350 }
8351
8352 static int tg3_mem_tx_acquire(struct tg3 *tp)
8353 {
8354         int i;
8355         struct tg3_napi *tnapi = &tp->napi[0];
8356
8357         /* If multivector TSS is enabled, vector 0 does not handle
8358          * tx interrupts.  Don't allocate any resources for it.
8359          */
8360         if (tg3_flag(tp, ENABLE_TSS))
8361                 tnapi++;
8362
8363         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8364                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8365                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8366                 if (!tnapi->tx_buffers)
8367                         goto err_out;
8368
8369                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8370                                                     TG3_TX_RING_BYTES,
8371                                                     &tnapi->tx_desc_mapping,
8372                                                     GFP_KERNEL);
8373                 if (!tnapi->tx_ring)
8374                         goto err_out;
8375         }
8376
8377         return 0;
8378
8379 err_out:
8380         tg3_mem_tx_release(tp);
8381         return -ENOMEM;
8382 }
8383
8384 static void tg3_mem_rx_release(struct tg3 *tp)
8385 {
8386         int i;
8387
8388         for (i = 0; i < tp->irq_max; i++) {
8389                 struct tg3_napi *tnapi = &tp->napi[i];
8390
8391                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8392
8393                 if (!tnapi->rx_rcb)
8394                         continue;
8395
8396                 dma_free_coherent(&tp->pdev->dev,
8397                                   TG3_RX_RCB_RING_BYTES(tp),
8398                                   tnapi->rx_rcb,
8399                                   tnapi->rx_rcb_mapping);
8400                 tnapi->rx_rcb = NULL;
8401         }
8402 }
8403
8404 static int tg3_mem_rx_acquire(struct tg3 *tp)
8405 {
8406         unsigned int i, limit;
8407
8408         limit = tp->rxq_cnt;
8409
8410         /* If RSS is enabled, we need a (dummy) producer ring
8411          * set on vector zero.  This is the true hw prodring.
8412          */
8413         if (tg3_flag(tp, ENABLE_RSS))
8414                 limit++;
8415
8416         for (i = 0; i < limit; i++) {
8417                 struct tg3_napi *tnapi = &tp->napi[i];
8418
8419                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8420                         goto err_out;
8421
8422                 /* If multivector RSS is enabled, vector 0
8423                  * does not handle rx or tx interrupts.
8424                  * Don't allocate any resources for it.
8425                  */
8426                 if (!i && tg3_flag(tp, ENABLE_RSS))
8427                         continue;
8428
8429                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8430                                                    TG3_RX_RCB_RING_BYTES(tp),
8431                                                    &tnapi->rx_rcb_mapping,
8432                                                    GFP_KERNEL | __GFP_ZERO);
8433                 if (!tnapi->rx_rcb)
8434                         goto err_out;
8435         }
8436
8437         return 0;
8438
8439 err_out:
8440         tg3_mem_rx_release(tp);
8441         return -ENOMEM;
8442 }
8443
8444 /*
8445  * Must not be invoked with interrupt sources disabled and
8446  * the hardware shutdown down.
8447  */
8448 static void tg3_free_consistent(struct tg3 *tp)
8449 {
8450         int i;
8451
8452         for (i = 0; i < tp->irq_cnt; i++) {
8453                 struct tg3_napi *tnapi = &tp->napi[i];
8454
8455                 if (tnapi->hw_status) {
8456                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8457                                           tnapi->hw_status,
8458                                           tnapi->status_mapping);
8459                         tnapi->hw_status = NULL;
8460                 }
8461         }
8462
8463         tg3_mem_rx_release(tp);
8464         tg3_mem_tx_release(tp);
8465
8466         if (tp->hw_stats) {
8467                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8468                                   tp->hw_stats, tp->stats_mapping);
8469                 tp->hw_stats = NULL;
8470         }
8471 }
8472
8473 /*
8474  * Must not be invoked with interrupt sources disabled and
8475  * the hardware shutdown down.  Can sleep.
8476  */
8477 static int tg3_alloc_consistent(struct tg3 *tp)
8478 {
8479         int i;
8480
8481         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8482                                           sizeof(struct tg3_hw_stats),
8483                                           &tp->stats_mapping,
8484                                           GFP_KERNEL | __GFP_ZERO);
8485         if (!tp->hw_stats)
8486                 goto err_out;
8487
8488         for (i = 0; i < tp->irq_cnt; i++) {
8489                 struct tg3_napi *tnapi = &tp->napi[i];
8490                 struct tg3_hw_status *sblk;
8491
8492                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8493                                                       TG3_HW_STATUS_SIZE,
8494                                                       &tnapi->status_mapping,
8495                                                       GFP_KERNEL | __GFP_ZERO);
8496                 if (!tnapi->hw_status)
8497                         goto err_out;
8498
8499                 sblk = tnapi->hw_status;
8500
8501                 if (tg3_flag(tp, ENABLE_RSS)) {
8502                         u16 *prodptr = NULL;
8503
8504                         /*
8505                          * When RSS is enabled, the status block format changes
8506                          * slightly.  The "rx_jumbo_consumer", "reserved",
8507                          * and "rx_mini_consumer" members get mapped to the
8508                          * other three rx return ring producer indexes.
8509                          */
8510                         switch (i) {
8511                         case 1:
8512                                 prodptr = &sblk->idx[0].rx_producer;
8513                                 break;
8514                         case 2:
8515                                 prodptr = &sblk->rx_jumbo_consumer;
8516                                 break;
8517                         case 3:
8518                                 prodptr = &sblk->reserved;
8519                                 break;
8520                         case 4:
8521                                 prodptr = &sblk->rx_mini_consumer;
8522                                 break;
8523                         }
8524                         tnapi->rx_rcb_prod_idx = prodptr;
8525                 } else {
8526                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8527                 }
8528         }
8529
8530         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8531                 goto err_out;
8532
8533         return 0;
8534
8535 err_out:
8536         tg3_free_consistent(tp);
8537         return -ENOMEM;
8538 }
8539
8540 #define MAX_WAIT_CNT 1000
8541
8542 /* To stop a block, clear the enable bit and poll till it
8543  * clears.  tp->lock is held.
8544  */
8545 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8546 {
8547         unsigned int i;
8548         u32 val;
8549
8550         if (tg3_flag(tp, 5705_PLUS)) {
8551                 switch (ofs) {
8552                 case RCVLSC_MODE:
8553                 case DMAC_MODE:
8554                 case MBFREE_MODE:
8555                 case BUFMGR_MODE:
8556                 case MEMARB_MODE:
8557                         /* We can't enable/disable these bits of the
8558                          * 5705/5750, just say success.
8559                          */
8560                         return 0;
8561
8562                 default:
8563                         break;
8564                 }
8565         }
8566
8567         val = tr32(ofs);
8568         val &= ~enable_bit;
8569         tw32_f(ofs, val);
8570
8571         for (i = 0; i < MAX_WAIT_CNT; i++) {
8572                 udelay(100);
8573                 val = tr32(ofs);
8574                 if ((val & enable_bit) == 0)
8575                         break;
8576         }
8577
8578         if (i == MAX_WAIT_CNT && !silent) {
8579                 dev_err(&tp->pdev->dev,
8580                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8581                         ofs, enable_bit);
8582                 return -ENODEV;
8583         }
8584
8585         return 0;
8586 }
8587
8588 /* tp->lock is held. */
8589 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8590 {
8591         int i, err;
8592
8593         tg3_disable_ints(tp);
8594
8595         tp->rx_mode &= ~RX_MODE_ENABLE;
8596         tw32_f(MAC_RX_MODE, tp->rx_mode);
8597         udelay(10);
8598
8599         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8600         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8601         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8602         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8603         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8604         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8605
8606         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8607         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8608         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8609         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8610         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8611         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8612         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8613
8614         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8615         tw32_f(MAC_MODE, tp->mac_mode);
8616         udelay(40);
8617
8618         tp->tx_mode &= ~TX_MODE_ENABLE;
8619         tw32_f(MAC_TX_MODE, tp->tx_mode);
8620
8621         for (i = 0; i < MAX_WAIT_CNT; i++) {
8622                 udelay(100);
8623                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8624                         break;
8625         }
8626         if (i >= MAX_WAIT_CNT) {
8627                 dev_err(&tp->pdev->dev,
8628                         "%s timed out, TX_MODE_ENABLE will not clear "
8629                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8630                 err |= -ENODEV;
8631         }
8632
8633         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8634         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8635         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8636
8637         tw32(FTQ_RESET, 0xffffffff);
8638         tw32(FTQ_RESET, 0x00000000);
8639
8640         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8641         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8642
8643         for (i = 0; i < tp->irq_cnt; i++) {
8644                 struct tg3_napi *tnapi = &tp->napi[i];
8645                 if (tnapi->hw_status)
8646                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8647         }
8648
8649         return err;
8650 }
8651
8652 /* Save PCI command register before chip reset */
8653 static void tg3_save_pci_state(struct tg3 *tp)
8654 {
8655         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8656 }
8657
8658 /* Restore PCI state after chip reset */
8659 static void tg3_restore_pci_state(struct tg3 *tp)
8660 {
8661         u32 val;
8662
8663         /* Re-enable indirect register accesses. */
8664         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8665                                tp->misc_host_ctrl);
8666
8667         /* Set MAX PCI retry to zero. */
8668         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8669         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8670             tg3_flag(tp, PCIX_MODE))
8671                 val |= PCISTATE_RETRY_SAME_DMA;
8672         /* Allow reads and writes to the APE register and memory space. */
8673         if (tg3_flag(tp, ENABLE_APE))
8674                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8675                        PCISTATE_ALLOW_APE_SHMEM_WR |
8676                        PCISTATE_ALLOW_APE_PSPACE_WR;
8677         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8678
8679         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8680
8681         if (!tg3_flag(tp, PCI_EXPRESS)) {
8682                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8683                                       tp->pci_cacheline_sz);
8684                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8685                                       tp->pci_lat_timer);
8686         }
8687
8688         /* Make sure PCI-X relaxed ordering bit is clear. */
8689         if (tg3_flag(tp, PCIX_MODE)) {
8690                 u16 pcix_cmd;
8691
8692                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8693                                      &pcix_cmd);
8694                 pcix_cmd &= ~PCI_X_CMD_ERO;
8695                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8696                                       pcix_cmd);
8697         }
8698
8699         if (tg3_flag(tp, 5780_CLASS)) {
8700
8701                 /* Chip reset on 5780 will reset MSI enable bit,
8702                  * so need to restore it.
8703                  */
8704                 if (tg3_flag(tp, USING_MSI)) {
8705                         u16 ctrl;
8706
8707                         pci_read_config_word(tp->pdev,
8708                                              tp->msi_cap + PCI_MSI_FLAGS,
8709                                              &ctrl);
8710                         pci_write_config_word(tp->pdev,
8711                                               tp->msi_cap + PCI_MSI_FLAGS,
8712                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8713                         val = tr32(MSGINT_MODE);
8714                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8715                 }
8716         }
8717 }
8718
8719 /* tp->lock is held. */
8720 static int tg3_chip_reset(struct tg3 *tp)
8721 {
8722         u32 val;
8723         void (*write_op)(struct tg3 *, u32, u32);
8724         int i, err;
8725
8726         tg3_nvram_lock(tp);
8727
8728         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8729
8730         /* No matching tg3_nvram_unlock() after this because
8731          * chip reset below will undo the nvram lock.
8732          */
8733         tp->nvram_lock_cnt = 0;
8734
8735         /* GRC_MISC_CFG core clock reset will clear the memory
8736          * enable bit in PCI register 4 and the MSI enable bit
8737          * on some chips, so we save relevant registers here.
8738          */
8739         tg3_save_pci_state(tp);
8740
8741         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8742             tg3_flag(tp, 5755_PLUS))
8743                 tw32(GRC_FASTBOOT_PC, 0);
8744
8745         /*
8746          * We must avoid the readl() that normally takes place.
8747          * It locks machines, causes machine checks, and other
8748          * fun things.  So, temporarily disable the 5701
8749          * hardware workaround, while we do the reset.
8750          */
8751         write_op = tp->write32;
8752         if (write_op == tg3_write_flush_reg32)
8753                 tp->write32 = tg3_write32;
8754
8755         /* Prevent the irq handler from reading or writing PCI registers
8756          * during chip reset when the memory enable bit in the PCI command
8757          * register may be cleared.  The chip does not generate interrupt
8758          * at this time, but the irq handler may still be called due to irq
8759          * sharing or irqpoll.
8760          */
8761         tg3_flag_set(tp, CHIP_RESETTING);
8762         for (i = 0; i < tp->irq_cnt; i++) {
8763                 struct tg3_napi *tnapi = &tp->napi[i];
8764                 if (tnapi->hw_status) {
8765                         tnapi->hw_status->status = 0;
8766                         tnapi->hw_status->status_tag = 0;
8767                 }
8768                 tnapi->last_tag = 0;
8769                 tnapi->last_irq_tag = 0;
8770         }
8771         smp_mb();
8772
8773         for (i = 0; i < tp->irq_cnt; i++)
8774                 synchronize_irq(tp->napi[i].irq_vec);
8775
8776         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8777                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8778                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8779         }
8780
8781         /* do the reset */
8782         val = GRC_MISC_CFG_CORECLK_RESET;
8783
8784         if (tg3_flag(tp, PCI_EXPRESS)) {
8785                 /* Force PCIe 1.0a mode */
8786                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8787                     !tg3_flag(tp, 57765_PLUS) &&
8788                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8789                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8790                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8791
8792                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8793                         tw32(GRC_MISC_CFG, (1 << 29));
8794                         val |= (1 << 29);
8795                 }
8796         }
8797
8798         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8799                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8800                 tw32(GRC_VCPU_EXT_CTRL,
8801                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8802         }
8803
8804         /* Manage gphy power for all CPMU absent PCIe devices. */
8805         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8806                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8807
8808         tw32(GRC_MISC_CFG, val);
8809
8810         /* restore 5701 hardware bug workaround write method */
8811         tp->write32 = write_op;
8812
8813         /* Unfortunately, we have to delay before the PCI read back.
8814          * Some 575X chips even will not respond to a PCI cfg access
8815          * when the reset command is given to the chip.
8816          *
8817          * How do these hardware designers expect things to work
8818          * properly if the PCI write is posted for a long period
8819          * of time?  It is always necessary to have some method by
8820          * which a register read back can occur to push the write
8821          * out which does the reset.
8822          *
8823          * For most tg3 variants the trick below was working.
8824          * Ho hum...
8825          */
8826         udelay(120);
8827
8828         /* Flush PCI posted writes.  The normal MMIO registers
8829          * are inaccessible at this time so this is the only
8830          * way to make this reliably (actually, this is no longer
8831          * the case, see above).  I tried to use indirect
8832          * register read/write but this upset some 5701 variants.
8833          */
8834         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8835
8836         udelay(120);
8837
8838         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8839                 u16 val16;
8840
8841                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8842                         int j;
8843                         u32 cfg_val;
8844
8845                         /* Wait for link training to complete.  */
8846                         for (j = 0; j < 5000; j++)
8847                                 udelay(100);
8848
8849                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8850                         pci_write_config_dword(tp->pdev, 0xc4,
8851                                                cfg_val | (1 << 15));
8852                 }
8853
8854                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8855                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8856                 /*
8857                  * Older PCIe devices only support the 128 byte
8858                  * MPS setting.  Enforce the restriction.
8859                  */
8860                 if (!tg3_flag(tp, CPMU_PRESENT))
8861                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8862                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8863
8864                 /* Clear error status */
8865                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8866                                       PCI_EXP_DEVSTA_CED |
8867                                       PCI_EXP_DEVSTA_NFED |
8868                                       PCI_EXP_DEVSTA_FED |
8869                                       PCI_EXP_DEVSTA_URD);
8870         }
8871
8872         tg3_restore_pci_state(tp);
8873
8874         tg3_flag_clear(tp, CHIP_RESETTING);
8875         tg3_flag_clear(tp, ERROR_PROCESSED);
8876
8877         val = 0;
8878         if (tg3_flag(tp, 5780_CLASS))
8879                 val = tr32(MEMARB_MODE);
8880         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8881
8882         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8883                 tg3_stop_fw(tp);
8884                 tw32(0x5000, 0x400);
8885         }
8886
8887         if (tg3_flag(tp, IS_SSB_CORE)) {
8888                 /*
8889                  * BCM4785: In order to avoid repercussions from using
8890                  * potentially defective internal ROM, stop the Rx RISC CPU,
8891                  * which is not required.
8892                  */
8893                 tg3_stop_fw(tp);
8894                 tg3_halt_cpu(tp, RX_CPU_BASE);
8895         }
8896
8897         tw32(GRC_MODE, tp->grc_mode);
8898
8899         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8900                 val = tr32(0xc4);
8901
8902                 tw32(0xc4, val | (1 << 15));
8903         }
8904
8905         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8906             tg3_asic_rev(tp) == ASIC_REV_5705) {
8907                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8908                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8909                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8910                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8911         }
8912
8913         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8914                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8915                 val = tp->mac_mode;
8916         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8917                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8918                 val = tp->mac_mode;
8919         } else
8920                 val = 0;
8921
8922         tw32_f(MAC_MODE, val);
8923         udelay(40);
8924
8925         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8926
8927         err = tg3_poll_fw(tp);
8928         if (err)
8929                 return err;
8930
8931         tg3_mdio_start(tp);
8932
8933         if (tg3_flag(tp, PCI_EXPRESS) &&
8934             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8935             tg3_asic_rev(tp) != ASIC_REV_5785 &&
8936             !tg3_flag(tp, 57765_PLUS)) {
8937                 val = tr32(0x7c00);
8938
8939                 tw32(0x7c00, val | (1 << 25));
8940         }
8941
8942         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8943                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8944                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8945         }
8946
8947         /* Reprobe ASF enable state.  */
8948         tg3_flag_clear(tp, ENABLE_ASF);
8949         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8950                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8951
8952         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8953         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8954         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8955                 u32 nic_cfg;
8956
8957                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8958                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8959                         tg3_flag_set(tp, ENABLE_ASF);
8960                         tp->last_event_jiffies = jiffies;
8961                         if (tg3_flag(tp, 5750_PLUS))
8962                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8963
8964                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8965                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8966                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8967                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8968                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8969                 }
8970         }
8971
8972         return 0;
8973 }
8974
8975 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8976 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8977
8978 /* tp->lock is held. */
8979 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
8980 {
8981         int err;
8982
8983         tg3_stop_fw(tp);
8984
8985         tg3_write_sig_pre_reset(tp, kind);
8986
8987         tg3_abort_hw(tp, silent);
8988         err = tg3_chip_reset(tp);
8989
8990         __tg3_set_mac_addr(tp, false);
8991
8992         tg3_write_sig_legacy(tp, kind);
8993         tg3_write_sig_post_reset(tp, kind);
8994
8995         if (tp->hw_stats) {
8996                 /* Save the stats across chip resets... */
8997                 tg3_get_nstats(tp, &tp->net_stats_prev);
8998                 tg3_get_estats(tp, &tp->estats_prev);
8999
9000                 /* And make sure the next sample is new data */
9001                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9002         }
9003
9004         if (err)
9005                 return err;
9006
9007         return 0;
9008 }
9009
9010 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9011 {
9012         struct tg3 *tp = netdev_priv(dev);
9013         struct sockaddr *addr = p;
9014         int err = 0;
9015         bool skip_mac_1 = false;
9016
9017         if (!is_valid_ether_addr(addr->sa_data))
9018                 return -EADDRNOTAVAIL;
9019
9020         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9021
9022         if (!netif_running(dev))
9023                 return 0;
9024
9025         if (tg3_flag(tp, ENABLE_ASF)) {
9026                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9027
9028                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9029                 addr0_low = tr32(MAC_ADDR_0_LOW);
9030                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9031                 addr1_low = tr32(MAC_ADDR_1_LOW);
9032
9033                 /* Skip MAC addr 1 if ASF is using it. */
9034                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9035                     !(addr1_high == 0 && addr1_low == 0))
9036                         skip_mac_1 = true;
9037         }
9038         spin_lock_bh(&tp->lock);
9039         __tg3_set_mac_addr(tp, skip_mac_1);
9040         spin_unlock_bh(&tp->lock);
9041
9042         return err;
9043 }
9044
9045 /* tp->lock is held. */
9046 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9047                            dma_addr_t mapping, u32 maxlen_flags,
9048                            u32 nic_addr)
9049 {
9050         tg3_write_mem(tp,
9051                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9052                       ((u64) mapping >> 32));
9053         tg3_write_mem(tp,
9054                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9055                       ((u64) mapping & 0xffffffff));
9056         tg3_write_mem(tp,
9057                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9058                        maxlen_flags);
9059
9060         if (!tg3_flag(tp, 5705_PLUS))
9061                 tg3_write_mem(tp,
9062                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9063                               nic_addr);
9064 }
9065
9066
9067 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9068 {
9069         int i = 0;
9070
9071         if (!tg3_flag(tp, ENABLE_TSS)) {
9072                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9073                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9074                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9075         } else {
9076                 tw32(HOSTCC_TXCOL_TICKS, 0);
9077                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9078                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9079
9080                 for (; i < tp->txq_cnt; i++) {
9081                         u32 reg;
9082
9083                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9084                         tw32(reg, ec->tx_coalesce_usecs);
9085                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9086                         tw32(reg, ec->tx_max_coalesced_frames);
9087                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9088                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9089                 }
9090         }
9091
9092         for (; i < tp->irq_max - 1; i++) {
9093                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9094                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9095                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9096         }
9097 }
9098
9099 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9100 {
9101         int i = 0;
9102         u32 limit = tp->rxq_cnt;
9103
9104         if (!tg3_flag(tp, ENABLE_RSS)) {
9105                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9106                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9107                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9108                 limit--;
9109         } else {
9110                 tw32(HOSTCC_RXCOL_TICKS, 0);
9111                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9112                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9113         }
9114
9115         for (; i < limit; i++) {
9116                 u32 reg;
9117
9118                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9119                 tw32(reg, ec->rx_coalesce_usecs);
9120                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9121                 tw32(reg, ec->rx_max_coalesced_frames);
9122                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9123                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9124         }
9125
9126         for (; i < tp->irq_max - 1; i++) {
9127                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9128                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9129                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9130         }
9131 }
9132
9133 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9134 {
9135         tg3_coal_tx_init(tp, ec);
9136         tg3_coal_rx_init(tp, ec);
9137
9138         if (!tg3_flag(tp, 5705_PLUS)) {
9139                 u32 val = ec->stats_block_coalesce_usecs;
9140
9141                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9142                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9143
9144                 if (!tp->link_up)
9145                         val = 0;
9146
9147                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9148         }
9149 }
9150
9151 /* tp->lock is held. */
9152 static void tg3_rings_reset(struct tg3 *tp)
9153 {
9154         int i;
9155         u32 stblk, txrcb, rxrcb, limit;
9156         struct tg3_napi *tnapi = &tp->napi[0];
9157
9158         /* Disable all transmit rings but the first. */
9159         if (!tg3_flag(tp, 5705_PLUS))
9160                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9161         else if (tg3_flag(tp, 5717_PLUS))
9162                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9163         else if (tg3_flag(tp, 57765_CLASS) ||
9164                  tg3_asic_rev(tp) == ASIC_REV_5762)
9165                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9166         else
9167                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9168
9169         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9170              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9171                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9172                               BDINFO_FLAGS_DISABLED);
9173
9174
9175         /* Disable all receive return rings but the first. */
9176         if (tg3_flag(tp, 5717_PLUS))
9177                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9178         else if (!tg3_flag(tp, 5705_PLUS))
9179                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9180         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9181                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9182                  tg3_flag(tp, 57765_CLASS))
9183                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9184         else
9185                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9186
9187         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9188              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9189                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9190                               BDINFO_FLAGS_DISABLED);
9191
9192         /* Disable interrupts */
9193         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9194         tp->napi[0].chk_msi_cnt = 0;
9195         tp->napi[0].last_rx_cons = 0;
9196         tp->napi[0].last_tx_cons = 0;
9197
9198         /* Zero mailbox registers. */
9199         if (tg3_flag(tp, SUPPORT_MSIX)) {
9200                 for (i = 1; i < tp->irq_max; i++) {
9201                         tp->napi[i].tx_prod = 0;
9202                         tp->napi[i].tx_cons = 0;
9203                         if (tg3_flag(tp, ENABLE_TSS))
9204                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9205                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9206                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9207                         tp->napi[i].chk_msi_cnt = 0;
9208                         tp->napi[i].last_rx_cons = 0;
9209                         tp->napi[i].last_tx_cons = 0;
9210                 }
9211                 if (!tg3_flag(tp, ENABLE_TSS))
9212                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9213         } else {
9214                 tp->napi[0].tx_prod = 0;
9215                 tp->napi[0].tx_cons = 0;
9216                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9217                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9218         }
9219
9220         /* Make sure the NIC-based send BD rings are disabled. */
9221         if (!tg3_flag(tp, 5705_PLUS)) {
9222                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9223                 for (i = 0; i < 16; i++)
9224                         tw32_tx_mbox(mbox + i * 8, 0);
9225         }
9226
9227         txrcb = NIC_SRAM_SEND_RCB;
9228         rxrcb = NIC_SRAM_RCV_RET_RCB;
9229
9230         /* Clear status block in ram. */
9231         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9232
9233         /* Set status block DMA address */
9234         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9235              ((u64) tnapi->status_mapping >> 32));
9236         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9237              ((u64) tnapi->status_mapping & 0xffffffff));
9238
9239         if (tnapi->tx_ring) {
9240                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9241                                (TG3_TX_RING_SIZE <<
9242                                 BDINFO_FLAGS_MAXLEN_SHIFT),
9243                                NIC_SRAM_TX_BUFFER_DESC);
9244                 txrcb += TG3_BDINFO_SIZE;
9245         }
9246
9247         if (tnapi->rx_rcb) {
9248                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9249                                (tp->rx_ret_ring_mask + 1) <<
9250                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9251                 rxrcb += TG3_BDINFO_SIZE;
9252         }
9253
9254         stblk = HOSTCC_STATBLCK_RING1;
9255
9256         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9257                 u64 mapping = (u64)tnapi->status_mapping;
9258                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9259                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9260
9261                 /* Clear status block in ram. */
9262                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9263
9264                 if (tnapi->tx_ring) {
9265                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9266                                        (TG3_TX_RING_SIZE <<
9267                                         BDINFO_FLAGS_MAXLEN_SHIFT),
9268                                        NIC_SRAM_TX_BUFFER_DESC);
9269                         txrcb += TG3_BDINFO_SIZE;
9270                 }
9271
9272                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9273                                ((tp->rx_ret_ring_mask + 1) <<
9274                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9275
9276                 stblk += 8;
9277                 rxrcb += TG3_BDINFO_SIZE;
9278         }
9279 }
9280
9281 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9282 {
9283         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9284
9285         if (!tg3_flag(tp, 5750_PLUS) ||
9286             tg3_flag(tp, 5780_CLASS) ||
9287             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9288             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9289             tg3_flag(tp, 57765_PLUS))
9290                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9291         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9292                  tg3_asic_rev(tp) == ASIC_REV_5787)
9293                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9294         else
9295                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9296
9297         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9298         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9299
9300         val = min(nic_rep_thresh, host_rep_thresh);
9301         tw32(RCVBDI_STD_THRESH, val);
9302
9303         if (tg3_flag(tp, 57765_PLUS))
9304                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9305
9306         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9307                 return;
9308
9309         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9310
9311         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9312
9313         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9314         tw32(RCVBDI_JUMBO_THRESH, val);
9315
9316         if (tg3_flag(tp, 57765_PLUS))
9317                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9318 }
9319
9320 static inline u32 calc_crc(unsigned char *buf, int len)
9321 {
9322         u32 reg;
9323         u32 tmp;
9324         int j, k;
9325
9326         reg = 0xffffffff;
9327
9328         for (j = 0; j < len; j++) {
9329                 reg ^= buf[j];
9330
9331                 for (k = 0; k < 8; k++) {
9332                         tmp = reg & 0x01;
9333
9334                         reg >>= 1;
9335
9336                         if (tmp)
9337                                 reg ^= 0xedb88320;
9338                 }
9339         }
9340
9341         return ~reg;
9342 }
9343
9344 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9345 {
9346         /* accept or reject all multicast frames */
9347         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9348         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9349         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9350         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9351 }
9352
9353 static void __tg3_set_rx_mode(struct net_device *dev)
9354 {
9355         struct tg3 *tp = netdev_priv(dev);
9356         u32 rx_mode;
9357
9358         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9359                                   RX_MODE_KEEP_VLAN_TAG);
9360
9361 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9362         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9363          * flag clear.
9364          */
9365         if (!tg3_flag(tp, ENABLE_ASF))
9366                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9367 #endif
9368
9369         if (dev->flags & IFF_PROMISC) {
9370                 /* Promiscuous mode. */
9371                 rx_mode |= RX_MODE_PROMISC;
9372         } else if (dev->flags & IFF_ALLMULTI) {
9373                 /* Accept all multicast. */
9374                 tg3_set_multi(tp, 1);
9375         } else if (netdev_mc_empty(dev)) {
9376                 /* Reject all multicast. */
9377                 tg3_set_multi(tp, 0);
9378         } else {
9379                 /* Accept one or more multicast(s). */
9380                 struct netdev_hw_addr *ha;
9381                 u32 mc_filter[4] = { 0, };
9382                 u32 regidx;
9383                 u32 bit;
9384                 u32 crc;
9385
9386                 netdev_for_each_mc_addr(ha, dev) {
9387                         crc = calc_crc(ha->addr, ETH_ALEN);
9388                         bit = ~crc & 0x7f;
9389                         regidx = (bit & 0x60) >> 5;
9390                         bit &= 0x1f;
9391                         mc_filter[regidx] |= (1 << bit);
9392                 }
9393
9394                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9395                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9396                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9397                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9398         }
9399
9400         if (rx_mode != tp->rx_mode) {
9401                 tp->rx_mode = rx_mode;
9402                 tw32_f(MAC_RX_MODE, rx_mode);
9403                 udelay(10);
9404         }
9405 }
9406
9407 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9408 {
9409         int i;
9410
9411         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9412                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9413 }
9414
9415 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9416 {
9417         int i;
9418
9419         if (!tg3_flag(tp, SUPPORT_MSIX))
9420                 return;
9421
9422         if (tp->rxq_cnt == 1) {
9423                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9424                 return;
9425         }
9426
9427         /* Validate table against current IRQ count */
9428         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9429                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9430                         break;
9431         }
9432
9433         if (i != TG3_RSS_INDIR_TBL_SIZE)
9434                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9435 }
9436
9437 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9438 {
9439         int i = 0;
9440         u32 reg = MAC_RSS_INDIR_TBL_0;
9441
9442         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9443                 u32 val = tp->rss_ind_tbl[i];
9444                 i++;
9445                 for (; i % 8; i++) {
9446                         val <<= 4;
9447                         val |= tp->rss_ind_tbl[i];
9448                 }
9449                 tw32(reg, val);
9450                 reg += 4;
9451         }
9452 }
9453
9454 /* tp->lock is held. */
9455 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9456 {
9457         u32 val, rdmac_mode;
9458         int i, err, limit;
9459         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9460
9461         tg3_disable_ints(tp);
9462
9463         tg3_stop_fw(tp);
9464
9465         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9466
9467         if (tg3_flag(tp, INIT_COMPLETE))
9468                 tg3_abort_hw(tp, 1);
9469
9470         /* Enable MAC control of LPI */
9471         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9472                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9473                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9474                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9475                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9476
9477                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9478
9479                 tw32_f(TG3_CPMU_EEE_CTRL,
9480                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9481
9482                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9483                       TG3_CPMU_EEEMD_LPI_IN_TX |
9484                       TG3_CPMU_EEEMD_LPI_IN_RX |
9485                       TG3_CPMU_EEEMD_EEE_ENABLE;
9486
9487                 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9488                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9489
9490                 if (tg3_flag(tp, ENABLE_APE))
9491                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9492
9493                 tw32_f(TG3_CPMU_EEE_MODE, val);
9494
9495                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9496                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9497                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9498
9499                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9500                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9501                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9502         }
9503
9504         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9505             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9506                 tg3_phy_pull_config(tp);
9507                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9508         }
9509
9510         if (reset_phy)
9511                 tg3_phy_reset(tp);
9512
9513         err = tg3_chip_reset(tp);
9514         if (err)
9515                 return err;
9516
9517         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9518
9519         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9520                 val = tr32(TG3_CPMU_CTRL);
9521                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9522                 tw32(TG3_CPMU_CTRL, val);
9523
9524                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9525                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9526                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9527                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9528
9529                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9530                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9531                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9532                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9533
9534                 val = tr32(TG3_CPMU_HST_ACC);
9535                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9536                 val |= CPMU_HST_ACC_MACCLK_6_25;
9537                 tw32(TG3_CPMU_HST_ACC, val);
9538         }
9539
9540         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9541                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9542                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9543                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9544                 tw32(PCIE_PWR_MGMT_THRESH, val);
9545
9546                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9547                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9548
9549                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9550
9551                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9552                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9553         }
9554
9555         if (tg3_flag(tp, L1PLLPD_EN)) {
9556                 u32 grc_mode = tr32(GRC_MODE);
9557
9558                 /* Access the lower 1K of PL PCIE block registers. */
9559                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9560                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9561
9562                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9563                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9564                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9565
9566                 tw32(GRC_MODE, grc_mode);
9567         }
9568
9569         if (tg3_flag(tp, 57765_CLASS)) {
9570                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9571                         u32 grc_mode = tr32(GRC_MODE);
9572
9573                         /* Access the lower 1K of PL PCIE block registers. */
9574                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9575                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9576
9577                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9578                                    TG3_PCIE_PL_LO_PHYCTL5);
9579                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9580                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9581
9582                         tw32(GRC_MODE, grc_mode);
9583                 }
9584
9585                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9586                         u32 grc_mode;
9587
9588                         /* Fix transmit hangs */
9589                         val = tr32(TG3_CPMU_PADRNG_CTL);
9590                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9591                         tw32(TG3_CPMU_PADRNG_CTL, val);
9592
9593                         grc_mode = tr32(GRC_MODE);
9594
9595                         /* Access the lower 1K of DL PCIE block registers. */
9596                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9597                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9598
9599                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9600                                    TG3_PCIE_DL_LO_FTSMAX);
9601                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9602                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9603                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9604
9605                         tw32(GRC_MODE, grc_mode);
9606                 }
9607
9608                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9609                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9610                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9611                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9612         }
9613
9614         /* This works around an issue with Athlon chipsets on
9615          * B3 tigon3 silicon.  This bit has no effect on any
9616          * other revision.  But do not set this on PCI Express
9617          * chips and don't even touch the clocks if the CPMU is present.
9618          */
9619         if (!tg3_flag(tp, CPMU_PRESENT)) {
9620                 if (!tg3_flag(tp, PCI_EXPRESS))
9621                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9622                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9623         }
9624
9625         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9626             tg3_flag(tp, PCIX_MODE)) {
9627                 val = tr32(TG3PCI_PCISTATE);
9628                 val |= PCISTATE_RETRY_SAME_DMA;
9629                 tw32(TG3PCI_PCISTATE, val);
9630         }
9631
9632         if (tg3_flag(tp, ENABLE_APE)) {
9633                 /* Allow reads and writes to the
9634                  * APE register and memory space.
9635                  */
9636                 val = tr32(TG3PCI_PCISTATE);
9637                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9638                        PCISTATE_ALLOW_APE_SHMEM_WR |
9639                        PCISTATE_ALLOW_APE_PSPACE_WR;
9640                 tw32(TG3PCI_PCISTATE, val);
9641         }
9642
9643         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9644                 /* Enable some hw fixes.  */
9645                 val = tr32(TG3PCI_MSI_DATA);
9646                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9647                 tw32(TG3PCI_MSI_DATA, val);
9648         }
9649
9650         /* Descriptor ring init may make accesses to the
9651          * NIC SRAM area to setup the TX descriptors, so we
9652          * can only do this after the hardware has been
9653          * successfully reset.
9654          */
9655         err = tg3_init_rings(tp);
9656         if (err)
9657                 return err;
9658
9659         if (tg3_flag(tp, 57765_PLUS)) {
9660                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9661                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9662                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9663                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9664                 if (!tg3_flag(tp, 57765_CLASS) &&
9665                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9666                     tg3_asic_rev(tp) != ASIC_REV_5762)
9667                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9668                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9669         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9670                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9671                 /* This value is determined during the probe time DMA
9672                  * engine test, tg3_test_dma.
9673                  */
9674                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9675         }
9676
9677         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9678                           GRC_MODE_4X_NIC_SEND_RINGS |
9679                           GRC_MODE_NO_TX_PHDR_CSUM |
9680                           GRC_MODE_NO_RX_PHDR_CSUM);
9681         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9682
9683         /* Pseudo-header checksum is done by hardware logic and not
9684          * the offload processers, so make the chip do the pseudo-
9685          * header checksums on receive.  For transmit it is more
9686          * convenient to do the pseudo-header checksum in software
9687          * as Linux does that on transmit for us in all cases.
9688          */
9689         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9690
9691         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9692         if (tp->rxptpctl)
9693                 tw32(TG3_RX_PTP_CTL,
9694                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9695
9696         if (tg3_flag(tp, PTP_CAPABLE))
9697                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9698
9699         tw32(GRC_MODE, tp->grc_mode | val);
9700
9701         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9702         val = tr32(GRC_MISC_CFG);
9703         val &= ~0xff;
9704         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9705         tw32(GRC_MISC_CFG, val);
9706
9707         /* Initialize MBUF/DESC pool. */
9708         if (tg3_flag(tp, 5750_PLUS)) {
9709                 /* Do nothing.  */
9710         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9711                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9712                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9713                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9714                 else
9715                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9716                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9717                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9718         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9719                 int fw_len;
9720
9721                 fw_len = tp->fw_len;
9722                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9723                 tw32(BUFMGR_MB_POOL_ADDR,
9724                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9725                 tw32(BUFMGR_MB_POOL_SIZE,
9726                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9727         }
9728
9729         if (tp->dev->mtu <= ETH_DATA_LEN) {
9730                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9731                      tp->bufmgr_config.mbuf_read_dma_low_water);
9732                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9733                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9734                 tw32(BUFMGR_MB_HIGH_WATER,
9735                      tp->bufmgr_config.mbuf_high_water);
9736         } else {
9737                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9738                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9739                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9740                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9741                 tw32(BUFMGR_MB_HIGH_WATER,
9742                      tp->bufmgr_config.mbuf_high_water_jumbo);
9743         }
9744         tw32(BUFMGR_DMA_LOW_WATER,
9745              tp->bufmgr_config.dma_low_water);
9746         tw32(BUFMGR_DMA_HIGH_WATER,
9747              tp->bufmgr_config.dma_high_water);
9748
9749         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9750         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9751                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9752         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9753             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9754             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9755                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9756         tw32(BUFMGR_MODE, val);
9757         for (i = 0; i < 2000; i++) {
9758                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9759                         break;
9760                 udelay(10);
9761         }
9762         if (i >= 2000) {
9763                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9764                 return -ENODEV;
9765         }
9766
9767         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9768                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9769
9770         tg3_setup_rxbd_thresholds(tp);
9771
9772         /* Initialize TG3_BDINFO's at:
9773          *  RCVDBDI_STD_BD:     standard eth size rx ring
9774          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9775          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9776          *
9777          * like so:
9778          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9779          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9780          *                              ring attribute flags
9781          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9782          *
9783          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9784          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9785          *
9786          * The size of each ring is fixed in the firmware, but the location is
9787          * configurable.
9788          */
9789         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9790              ((u64) tpr->rx_std_mapping >> 32));
9791         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9792              ((u64) tpr->rx_std_mapping & 0xffffffff));
9793         if (!tg3_flag(tp, 5717_PLUS))
9794                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9795                      NIC_SRAM_RX_BUFFER_DESC);
9796
9797         /* Disable the mini ring */
9798         if (!tg3_flag(tp, 5705_PLUS))
9799                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9800                      BDINFO_FLAGS_DISABLED);
9801
9802         /* Program the jumbo buffer descriptor ring control
9803          * blocks on those devices that have them.
9804          */
9805         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9806             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9807
9808                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9809                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9810                              ((u64) tpr->rx_jmb_mapping >> 32));
9811                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9812                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9813                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9814                               BDINFO_FLAGS_MAXLEN_SHIFT;
9815                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9816                              val | BDINFO_FLAGS_USE_EXT_RECV);
9817                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9818                             tg3_flag(tp, 57765_CLASS) ||
9819                             tg3_asic_rev(tp) == ASIC_REV_5762)
9820                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9821                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9822                 } else {
9823                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9824                              BDINFO_FLAGS_DISABLED);
9825                 }
9826
9827                 if (tg3_flag(tp, 57765_PLUS)) {
9828                         val = TG3_RX_STD_RING_SIZE(tp);
9829                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9830                         val |= (TG3_RX_STD_DMA_SZ << 2);
9831                 } else
9832                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9833         } else
9834                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9835
9836         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9837
9838         tpr->rx_std_prod_idx = tp->rx_pending;
9839         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9840
9841         tpr->rx_jmb_prod_idx =
9842                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9843         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9844
9845         tg3_rings_reset(tp);
9846
9847         /* Initialize MAC address and backoff seed. */
9848         __tg3_set_mac_addr(tp, false);
9849
9850         /* MTU + ethernet header + FCS + optional VLAN tag */
9851         tw32(MAC_RX_MTU_SIZE,
9852              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9853
9854         /* The slot time is changed by tg3_setup_phy if we
9855          * run at gigabit with half duplex.
9856          */
9857         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9858               (6 << TX_LENGTHS_IPG_SHIFT) |
9859               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9860
9861         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9862             tg3_asic_rev(tp) == ASIC_REV_5762)
9863                 val |= tr32(MAC_TX_LENGTHS) &
9864                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9865                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9866
9867         tw32(MAC_TX_LENGTHS, val);
9868
9869         /* Receive rules. */
9870         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9871         tw32(RCVLPC_CONFIG, 0x0181);
9872
9873         /* Calculate RDMAC_MODE setting early, we need it to determine
9874          * the RCVLPC_STATE_ENABLE mask.
9875          */
9876         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9877                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9878                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9879                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9880                       RDMAC_MODE_LNGREAD_ENAB);
9881
9882         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9883                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9884
9885         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9886             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9887             tg3_asic_rev(tp) == ASIC_REV_57780)
9888                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9889                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9890                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9891
9892         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9893             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9894                 if (tg3_flag(tp, TSO_CAPABLE) &&
9895                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9896                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9897                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9898                            !tg3_flag(tp, IS_5788)) {
9899                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9900                 }
9901         }
9902
9903         if (tg3_flag(tp, PCI_EXPRESS))
9904                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9905
9906         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9907                 tp->dma_limit = 0;
9908                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9909                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9910                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9911                 }
9912         }
9913
9914         if (tg3_flag(tp, HW_TSO_1) ||
9915             tg3_flag(tp, HW_TSO_2) ||
9916             tg3_flag(tp, HW_TSO_3))
9917                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9918
9919         if (tg3_flag(tp, 57765_PLUS) ||
9920             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9921             tg3_asic_rev(tp) == ASIC_REV_57780)
9922                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9923
9924         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9925             tg3_asic_rev(tp) == ASIC_REV_5762)
9926                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9927
9928         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9929             tg3_asic_rev(tp) == ASIC_REV_5784 ||
9930             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9931             tg3_asic_rev(tp) == ASIC_REV_57780 ||
9932             tg3_flag(tp, 57765_PLUS)) {
9933                 u32 tgtreg;
9934
9935                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9936                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9937                 else
9938                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9939
9940                 val = tr32(tgtreg);
9941                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9942                     tg3_asic_rev(tp) == ASIC_REV_5762) {
9943                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9944                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9945                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9946                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9947                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9948                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9949                 }
9950                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9951         }
9952
9953         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9954             tg3_asic_rev(tp) == ASIC_REV_5720 ||
9955             tg3_asic_rev(tp) == ASIC_REV_5762) {
9956                 u32 tgtreg;
9957
9958                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9959                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9960                 else
9961                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9962
9963                 val = tr32(tgtreg);
9964                 tw32(tgtreg, val |
9965                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9966                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9967         }
9968
9969         /* Receive/send statistics. */
9970         if (tg3_flag(tp, 5750_PLUS)) {
9971                 val = tr32(RCVLPC_STATS_ENABLE);
9972                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9973                 tw32(RCVLPC_STATS_ENABLE, val);
9974         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9975                    tg3_flag(tp, TSO_CAPABLE)) {
9976                 val = tr32(RCVLPC_STATS_ENABLE);
9977                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9978                 tw32(RCVLPC_STATS_ENABLE, val);
9979         } else {
9980                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9981         }
9982         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9983         tw32(SNDDATAI_STATSENAB, 0xffffff);
9984         tw32(SNDDATAI_STATSCTRL,
9985              (SNDDATAI_SCTRL_ENABLE |
9986               SNDDATAI_SCTRL_FASTUPD));
9987
9988         /* Setup host coalescing engine. */
9989         tw32(HOSTCC_MODE, 0);
9990         for (i = 0; i < 2000; i++) {
9991                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9992                         break;
9993                 udelay(10);
9994         }
9995
9996         __tg3_set_coalesce(tp, &tp->coal);
9997
9998         if (!tg3_flag(tp, 5705_PLUS)) {
9999                 /* Status/statistics block address.  See tg3_timer,
10000                  * the tg3_periodic_fetch_stats call there, and
10001                  * tg3_get_stats to see how this works for 5705/5750 chips.
10002                  */
10003                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10004                      ((u64) tp->stats_mapping >> 32));
10005                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10006                      ((u64) tp->stats_mapping & 0xffffffff));
10007                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10008
10009                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10010
10011                 /* Clear statistics and status block memory areas */
10012                 for (i = NIC_SRAM_STATS_BLK;
10013                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10014                      i += sizeof(u32)) {
10015                         tg3_write_mem(tp, i, 0);
10016                         udelay(40);
10017                 }
10018         }
10019
10020         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10021
10022         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10023         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10024         if (!tg3_flag(tp, 5705_PLUS))
10025                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10026
10027         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10028                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10029                 /* reset to prevent losing 1st rx packet intermittently */
10030                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10031                 udelay(10);
10032         }
10033
10034         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10035                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10036                         MAC_MODE_FHDE_ENABLE;
10037         if (tg3_flag(tp, ENABLE_APE))
10038                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10039         if (!tg3_flag(tp, 5705_PLUS) &&
10040             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10041             tg3_asic_rev(tp) != ASIC_REV_5700)
10042                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10043         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10044         udelay(40);
10045
10046         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10047          * If TG3_FLAG_IS_NIC is zero, we should read the
10048          * register to preserve the GPIO settings for LOMs. The GPIOs,
10049          * whether used as inputs or outputs, are set by boot code after
10050          * reset.
10051          */
10052         if (!tg3_flag(tp, IS_NIC)) {
10053                 u32 gpio_mask;
10054
10055                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10056                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10057                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10058
10059                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10060                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10061                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10062
10063                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10064                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10065
10066                 tp->grc_local_ctrl &= ~gpio_mask;
10067                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10068
10069                 /* GPIO1 must be driven high for eeprom write protect */
10070                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10071                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10072                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10073         }
10074         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10075         udelay(100);
10076
10077         if (tg3_flag(tp, USING_MSIX)) {
10078                 val = tr32(MSGINT_MODE);
10079                 val |= MSGINT_MODE_ENABLE;
10080                 if (tp->irq_cnt > 1)
10081                         val |= MSGINT_MODE_MULTIVEC_EN;
10082                 if (!tg3_flag(tp, 1SHOT_MSI))
10083                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10084                 tw32(MSGINT_MODE, val);
10085         }
10086
10087         if (!tg3_flag(tp, 5705_PLUS)) {
10088                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10089                 udelay(40);
10090         }
10091
10092         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10093                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10094                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10095                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10096                WDMAC_MODE_LNGREAD_ENAB);
10097
10098         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10099             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10100                 if (tg3_flag(tp, TSO_CAPABLE) &&
10101                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10102                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10103                         /* nothing */
10104                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10105                            !tg3_flag(tp, IS_5788)) {
10106                         val |= WDMAC_MODE_RX_ACCEL;
10107                 }
10108         }
10109
10110         /* Enable host coalescing bug fix */
10111         if (tg3_flag(tp, 5755_PLUS))
10112                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10113
10114         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10115                 val |= WDMAC_MODE_BURST_ALL_DATA;
10116
10117         tw32_f(WDMAC_MODE, val);
10118         udelay(40);
10119
10120         if (tg3_flag(tp, PCIX_MODE)) {
10121                 u16 pcix_cmd;
10122
10123                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10124                                      &pcix_cmd);
10125                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10126                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10127                         pcix_cmd |= PCI_X_CMD_READ_2K;
10128                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10129                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10130                         pcix_cmd |= PCI_X_CMD_READ_2K;
10131                 }
10132                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10133                                       pcix_cmd);
10134         }
10135
10136         tw32_f(RDMAC_MODE, rdmac_mode);
10137         udelay(40);
10138
10139         if (tg3_asic_rev(tp) == ASIC_REV_5719) {
10140                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10141                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10142                                 break;
10143                 }
10144                 if (i < TG3_NUM_RDMA_CHANNELS) {
10145                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10146                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
10147                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10148                         tg3_flag_set(tp, 5719_RDMA_BUG);
10149                 }
10150         }
10151
10152         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10153         if (!tg3_flag(tp, 5705_PLUS))
10154                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10155
10156         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10157                 tw32(SNDDATAC_MODE,
10158                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10159         else
10160                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10161
10162         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10163         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10164         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10165         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10166                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10167         tw32(RCVDBDI_MODE, val);
10168         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10169         if (tg3_flag(tp, HW_TSO_1) ||
10170             tg3_flag(tp, HW_TSO_2) ||
10171             tg3_flag(tp, HW_TSO_3))
10172                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10173         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10174         if (tg3_flag(tp, ENABLE_TSS))
10175                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10176         tw32(SNDBDI_MODE, val);
10177         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10178
10179         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10180                 err = tg3_load_5701_a0_firmware_fix(tp);
10181                 if (err)
10182                         return err;
10183         }
10184
10185         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10186                 /* Ignore any errors for the firmware download. If download
10187                  * fails, the device will operate with EEE disabled
10188                  */
10189                 tg3_load_57766_firmware(tp);
10190         }
10191
10192         if (tg3_flag(tp, TSO_CAPABLE)) {
10193                 err = tg3_load_tso_firmware(tp);
10194                 if (err)
10195                         return err;
10196         }
10197
10198         tp->tx_mode = TX_MODE_ENABLE;
10199
10200         if (tg3_flag(tp, 5755_PLUS) ||
10201             tg3_asic_rev(tp) == ASIC_REV_5906)
10202                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10203
10204         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10205             tg3_asic_rev(tp) == ASIC_REV_5762) {
10206                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10207                 tp->tx_mode &= ~val;
10208                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10209         }
10210
10211         tw32_f(MAC_TX_MODE, tp->tx_mode);
10212         udelay(100);
10213
10214         if (tg3_flag(tp, ENABLE_RSS)) {
10215                 tg3_rss_write_indir_tbl(tp);
10216
10217                 /* Setup the "secret" hash key. */
10218                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10219                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10220                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10221                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10222                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10223                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10224                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10225                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10226                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10227                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10228         }
10229
10230         tp->rx_mode = RX_MODE_ENABLE;
10231         if (tg3_flag(tp, 5755_PLUS))
10232                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10233
10234         if (tg3_flag(tp, ENABLE_RSS))
10235                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10236                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10237                                RX_MODE_RSS_IPV6_HASH_EN |
10238                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10239                                RX_MODE_RSS_IPV4_HASH_EN |
10240                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10241
10242         tw32_f(MAC_RX_MODE, tp->rx_mode);
10243         udelay(10);
10244
10245         tw32(MAC_LED_CTRL, tp->led_ctrl);
10246
10247         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10248         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10249                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10250                 udelay(10);
10251         }
10252         tw32_f(MAC_RX_MODE, tp->rx_mode);
10253         udelay(10);
10254
10255         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10256                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10257                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10258                         /* Set drive transmission level to 1.2V  */
10259                         /* only if the signal pre-emphasis bit is not set  */
10260                         val = tr32(MAC_SERDES_CFG);
10261                         val &= 0xfffff000;
10262                         val |= 0x880;
10263                         tw32(MAC_SERDES_CFG, val);
10264                 }
10265                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10266                         tw32(MAC_SERDES_CFG, 0x616000);
10267         }
10268
10269         /* Prevent chip from dropping frames when flow control
10270          * is enabled.
10271          */
10272         if (tg3_flag(tp, 57765_CLASS))
10273                 val = 1;
10274         else
10275                 val = 2;
10276         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10277
10278         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10279             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10280                 /* Use hardware link auto-negotiation */
10281                 tg3_flag_set(tp, HW_AUTONEG);
10282         }
10283
10284         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10285             tg3_asic_rev(tp) == ASIC_REV_5714) {
10286                 u32 tmp;
10287
10288                 tmp = tr32(SERDES_RX_CTRL);
10289                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10290                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10291                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10292                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10293         }
10294
10295         if (!tg3_flag(tp, USE_PHYLIB)) {
10296                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10297                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10298
10299                 err = tg3_setup_phy(tp, false);
10300                 if (err)
10301                         return err;
10302
10303                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10304                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10305                         u32 tmp;
10306
10307                         /* Clear CRC stats. */
10308                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10309                                 tg3_writephy(tp, MII_TG3_TEST1,
10310                                              tmp | MII_TG3_TEST1_CRC_EN);
10311                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10312                         }
10313                 }
10314         }
10315
10316         __tg3_set_rx_mode(tp->dev);
10317
10318         /* Initialize receive rules. */
10319         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10320         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10321         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10322         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10323
10324         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10325                 limit = 8;
10326         else
10327                 limit = 16;
10328         if (tg3_flag(tp, ENABLE_ASF))
10329                 limit -= 4;
10330         switch (limit) {
10331         case 16:
10332                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10333         case 15:
10334                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10335         case 14:
10336                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10337         case 13:
10338                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10339         case 12:
10340                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10341         case 11:
10342                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10343         case 10:
10344                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10345         case 9:
10346                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10347         case 8:
10348                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10349         case 7:
10350                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10351         case 6:
10352                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10353         case 5:
10354                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10355         case 4:
10356                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10357         case 3:
10358                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10359         case 2:
10360         case 1:
10361
10362         default:
10363                 break;
10364         }
10365
10366         if (tg3_flag(tp, ENABLE_APE))
10367                 /* Write our heartbeat update interval to APE. */
10368                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10369                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10370
10371         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10372
10373         return 0;
10374 }
10375
10376 /* Called at device open time to get the chip ready for
10377  * packet processing.  Invoked with tp->lock held.
10378  */
10379 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10380 {
10381         tg3_switch_clocks(tp);
10382
10383         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10384
10385         return tg3_reset_hw(tp, reset_phy);
10386 }
10387
10388 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10389 {
10390         int i;
10391
10392         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10393                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10394
10395                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10396                 off += len;
10397
10398                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10399                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10400                         memset(ocir, 0, TG3_OCIR_LEN);
10401         }
10402 }
10403
10404 /* sysfs attributes for hwmon */
10405 static ssize_t tg3_show_temp(struct device *dev,
10406                              struct device_attribute *devattr, char *buf)
10407 {
10408         struct pci_dev *pdev = to_pci_dev(dev);
10409         struct net_device *netdev = pci_get_drvdata(pdev);
10410         struct tg3 *tp = netdev_priv(netdev);
10411         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10412         u32 temperature;
10413
10414         spin_lock_bh(&tp->lock);
10415         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10416                                 sizeof(temperature));
10417         spin_unlock_bh(&tp->lock);
10418         return sprintf(buf, "%u\n", temperature);
10419 }
10420
10421
10422 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10423                           TG3_TEMP_SENSOR_OFFSET);
10424 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10425                           TG3_TEMP_CAUTION_OFFSET);
10426 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10427                           TG3_TEMP_MAX_OFFSET);
10428
10429 static struct attribute *tg3_attributes[] = {
10430         &sensor_dev_attr_temp1_input.dev_attr.attr,
10431         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10432         &sensor_dev_attr_temp1_max.dev_attr.attr,
10433         NULL
10434 };
10435
10436 static const struct attribute_group tg3_group = {
10437         .attrs = tg3_attributes,
10438 };
10439
10440 static void tg3_hwmon_close(struct tg3 *tp)
10441 {
10442         if (tp->hwmon_dev) {
10443                 hwmon_device_unregister(tp->hwmon_dev);
10444                 tp->hwmon_dev = NULL;
10445                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10446         }
10447 }
10448
10449 static void tg3_hwmon_open(struct tg3 *tp)
10450 {
10451         int i, err;
10452         u32 size = 0;
10453         struct pci_dev *pdev = tp->pdev;
10454         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10455
10456         tg3_sd_scan_scratchpad(tp, ocirs);
10457
10458         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10459                 if (!ocirs[i].src_data_length)
10460                         continue;
10461
10462                 size += ocirs[i].src_hdr_length;
10463                 size += ocirs[i].src_data_length;
10464         }
10465
10466         if (!size)
10467                 return;
10468
10469         /* Register hwmon sysfs hooks */
10470         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10471         if (err) {
10472                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10473                 return;
10474         }
10475
10476         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10477         if (IS_ERR(tp->hwmon_dev)) {
10478                 tp->hwmon_dev = NULL;
10479                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10480                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10481         }
10482 }
10483
10484
10485 #define TG3_STAT_ADD32(PSTAT, REG) \
10486 do {    u32 __val = tr32(REG); \
10487         (PSTAT)->low += __val; \
10488         if ((PSTAT)->low < __val) \
10489                 (PSTAT)->high += 1; \
10490 } while (0)
10491
10492 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10493 {
10494         struct tg3_hw_stats *sp = tp->hw_stats;
10495
10496         if (!tp->link_up)
10497                 return;
10498
10499         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10500         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10501         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10502         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10503         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10504         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10505         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10506         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10507         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10508         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10509         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10510         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10511         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10512         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10513                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10514                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10515                 u32 val;
10516
10517                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10518                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10519                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10520                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10521         }
10522
10523         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10524         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10525         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10526         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10527         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10528         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10529         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10530         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10531         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10532         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10533         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10534         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10535         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10536         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10537
10538         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10539         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10540             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10541             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10542                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10543         } else {
10544                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10545                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10546                 if (val) {
10547                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10548                         sp->rx_discards.low += val;
10549                         if (sp->rx_discards.low < val)
10550                                 sp->rx_discards.high += 1;
10551                 }
10552                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10553         }
10554         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10555 }
10556
10557 static void tg3_chk_missed_msi(struct tg3 *tp)
10558 {
10559         u32 i;
10560
10561         for (i = 0; i < tp->irq_cnt; i++) {
10562                 struct tg3_napi *tnapi = &tp->napi[i];
10563
10564                 if (tg3_has_work(tnapi)) {
10565                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10566                             tnapi->last_tx_cons == tnapi->tx_cons) {
10567                                 if (tnapi->chk_msi_cnt < 1) {
10568                                         tnapi->chk_msi_cnt++;
10569                                         return;
10570                                 }
10571                                 tg3_msi(0, tnapi);
10572                         }
10573                 }
10574                 tnapi->chk_msi_cnt = 0;
10575                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10576                 tnapi->last_tx_cons = tnapi->tx_cons;
10577         }
10578 }
10579
10580 static void tg3_timer(unsigned long __opaque)
10581 {
10582         struct tg3 *tp = (struct tg3 *) __opaque;
10583
10584         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10585                 goto restart_timer;
10586
10587         spin_lock(&tp->lock);
10588
10589         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10590             tg3_flag(tp, 57765_CLASS))
10591                 tg3_chk_missed_msi(tp);
10592
10593         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10594                 /* BCM4785: Flush posted writes from GbE to host memory. */
10595                 tr32(HOSTCC_MODE);
10596         }
10597
10598         if (!tg3_flag(tp, TAGGED_STATUS)) {
10599                 /* All of this garbage is because when using non-tagged
10600                  * IRQ status the mailbox/status_block protocol the chip
10601                  * uses with the cpu is race prone.
10602                  */
10603                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10604                         tw32(GRC_LOCAL_CTRL,
10605                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10606                 } else {
10607                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10608                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10609                 }
10610
10611                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10612                         spin_unlock(&tp->lock);
10613                         tg3_reset_task_schedule(tp);
10614                         goto restart_timer;
10615                 }
10616         }
10617
10618         /* This part only runs once per second. */
10619         if (!--tp->timer_counter) {
10620                 if (tg3_flag(tp, 5705_PLUS))
10621                         tg3_periodic_fetch_stats(tp);
10622
10623                 if (tp->setlpicnt && !--tp->setlpicnt)
10624                         tg3_phy_eee_enable(tp);
10625
10626                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10627                         u32 mac_stat;
10628                         int phy_event;
10629
10630                         mac_stat = tr32(MAC_STATUS);
10631
10632                         phy_event = 0;
10633                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10634                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10635                                         phy_event = 1;
10636                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10637                                 phy_event = 1;
10638
10639                         if (phy_event)
10640                                 tg3_setup_phy(tp, false);
10641                 } else if (tg3_flag(tp, POLL_SERDES)) {
10642                         u32 mac_stat = tr32(MAC_STATUS);
10643                         int need_setup = 0;
10644
10645                         if (tp->link_up &&
10646                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10647                                 need_setup = 1;
10648                         }
10649                         if (!tp->link_up &&
10650                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10651                                          MAC_STATUS_SIGNAL_DET))) {
10652                                 need_setup = 1;
10653                         }
10654                         if (need_setup) {
10655                                 if (!tp->serdes_counter) {
10656                                         tw32_f(MAC_MODE,
10657                                              (tp->mac_mode &
10658                                               ~MAC_MODE_PORT_MODE_MASK));
10659                                         udelay(40);
10660                                         tw32_f(MAC_MODE, tp->mac_mode);
10661                                         udelay(40);
10662                                 }
10663                                 tg3_setup_phy(tp, false);
10664                         }
10665                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10666                            tg3_flag(tp, 5780_CLASS)) {
10667                         tg3_serdes_parallel_detect(tp);
10668                 }
10669
10670                 tp->timer_counter = tp->timer_multiplier;
10671         }
10672
10673         /* Heartbeat is only sent once every 2 seconds.
10674          *
10675          * The heartbeat is to tell the ASF firmware that the host
10676          * driver is still alive.  In the event that the OS crashes,
10677          * ASF needs to reset the hardware to free up the FIFO space
10678          * that may be filled with rx packets destined for the host.
10679          * If the FIFO is full, ASF will no longer function properly.
10680          *
10681          * Unintended resets have been reported on real time kernels
10682          * where the timer doesn't run on time.  Netpoll will also have
10683          * same problem.
10684          *
10685          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10686          * to check the ring condition when the heartbeat is expiring
10687          * before doing the reset.  This will prevent most unintended
10688          * resets.
10689          */
10690         if (!--tp->asf_counter) {
10691                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10692                         tg3_wait_for_event_ack(tp);
10693
10694                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10695                                       FWCMD_NICDRV_ALIVE3);
10696                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10697                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10698                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10699
10700                         tg3_generate_fw_event(tp);
10701                 }
10702                 tp->asf_counter = tp->asf_multiplier;
10703         }
10704
10705         spin_unlock(&tp->lock);
10706
10707 restart_timer:
10708         tp->timer.expires = jiffies + tp->timer_offset;
10709         add_timer(&tp->timer);
10710 }
10711
10712 static void tg3_timer_init(struct tg3 *tp)
10713 {
10714         if (tg3_flag(tp, TAGGED_STATUS) &&
10715             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10716             !tg3_flag(tp, 57765_CLASS))
10717                 tp->timer_offset = HZ;
10718         else
10719                 tp->timer_offset = HZ / 10;
10720
10721         BUG_ON(tp->timer_offset > HZ);
10722
10723         tp->timer_multiplier = (HZ / tp->timer_offset);
10724         tp->asf_multiplier = (HZ / tp->timer_offset) *
10725                              TG3_FW_UPDATE_FREQ_SEC;
10726
10727         init_timer(&tp->timer);
10728         tp->timer.data = (unsigned long) tp;
10729         tp->timer.function = tg3_timer;
10730 }
10731
10732 static void tg3_timer_start(struct tg3 *tp)
10733 {
10734         tp->asf_counter   = tp->asf_multiplier;
10735         tp->timer_counter = tp->timer_multiplier;
10736
10737         tp->timer.expires = jiffies + tp->timer_offset;
10738         add_timer(&tp->timer);
10739 }
10740
10741 static void tg3_timer_stop(struct tg3 *tp)
10742 {
10743         del_timer_sync(&tp->timer);
10744 }
10745
10746 /* Restart hardware after configuration changes, self-test, etc.
10747  * Invoked with tp->lock held.
10748  */
10749 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10750         __releases(tp->lock)
10751         __acquires(tp->lock)
10752 {
10753         int err;
10754
10755         err = tg3_init_hw(tp, reset_phy);
10756         if (err) {
10757                 netdev_err(tp->dev,
10758                            "Failed to re-initialize device, aborting\n");
10759                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10760                 tg3_full_unlock(tp);
10761                 tg3_timer_stop(tp);
10762                 tp->irq_sync = 0;
10763                 tg3_napi_enable(tp);
10764                 dev_close(tp->dev);
10765                 tg3_full_lock(tp, 0);
10766         }
10767         return err;
10768 }
10769
10770 static void tg3_reset_task(struct work_struct *work)
10771 {
10772         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10773         int err;
10774
10775         tg3_full_lock(tp, 0);
10776
10777         if (!netif_running(tp->dev)) {
10778                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10779                 tg3_full_unlock(tp);
10780                 return;
10781         }
10782
10783         tg3_full_unlock(tp);
10784
10785         tg3_phy_stop(tp);
10786
10787         tg3_netif_stop(tp);
10788
10789         tg3_full_lock(tp, 1);
10790
10791         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10792                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10793                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10794                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10795                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10796         }
10797
10798         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10799         err = tg3_init_hw(tp, true);
10800         if (err)
10801                 goto out;
10802
10803         tg3_netif_start(tp);
10804
10805 out:
10806         tg3_full_unlock(tp);
10807
10808         if (!err)
10809                 tg3_phy_start(tp);
10810
10811         tg3_flag_clear(tp, RESET_TASK_PENDING);
10812 }
10813
10814 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10815 {
10816         irq_handler_t fn;
10817         unsigned long flags;
10818         char *name;
10819         struct tg3_napi *tnapi = &tp->napi[irq_num];
10820
10821         if (tp->irq_cnt == 1)
10822                 name = tp->dev->name;
10823         else {
10824                 name = &tnapi->irq_lbl[0];
10825                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10826                 name[IFNAMSIZ-1] = 0;
10827         }
10828
10829         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10830                 fn = tg3_msi;
10831                 if (tg3_flag(tp, 1SHOT_MSI))
10832                         fn = tg3_msi_1shot;
10833                 flags = 0;
10834         } else {
10835                 fn = tg3_interrupt;
10836                 if (tg3_flag(tp, TAGGED_STATUS))
10837                         fn = tg3_interrupt_tagged;
10838                 flags = IRQF_SHARED;
10839         }
10840
10841         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10842 }
10843
10844 static int tg3_test_interrupt(struct tg3 *tp)
10845 {
10846         struct tg3_napi *tnapi = &tp->napi[0];
10847         struct net_device *dev = tp->dev;
10848         int err, i, intr_ok = 0;
10849         u32 val;
10850
10851         if (!netif_running(dev))
10852                 return -ENODEV;
10853
10854         tg3_disable_ints(tp);
10855
10856         free_irq(tnapi->irq_vec, tnapi);
10857
10858         /*
10859          * Turn off MSI one shot mode.  Otherwise this test has no
10860          * observable way to know whether the interrupt was delivered.
10861          */
10862         if (tg3_flag(tp, 57765_PLUS)) {
10863                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10864                 tw32(MSGINT_MODE, val);
10865         }
10866
10867         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10868                           IRQF_SHARED, dev->name, tnapi);
10869         if (err)
10870                 return err;
10871
10872         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10873         tg3_enable_ints(tp);
10874
10875         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10876                tnapi->coal_now);
10877
10878         for (i = 0; i < 5; i++) {
10879                 u32 int_mbox, misc_host_ctrl;
10880
10881                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10882                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10883
10884                 if ((int_mbox != 0) ||
10885                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10886                         intr_ok = 1;
10887                         break;
10888                 }
10889
10890                 if (tg3_flag(tp, 57765_PLUS) &&
10891                     tnapi->hw_status->status_tag != tnapi->last_tag)
10892                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10893
10894                 msleep(10);
10895         }
10896
10897         tg3_disable_ints(tp);
10898
10899         free_irq(tnapi->irq_vec, tnapi);
10900
10901         err = tg3_request_irq(tp, 0);
10902
10903         if (err)
10904                 return err;
10905
10906         if (intr_ok) {
10907                 /* Reenable MSI one shot mode. */
10908                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10909                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10910                         tw32(MSGINT_MODE, val);
10911                 }
10912                 return 0;
10913         }
10914
10915         return -EIO;
10916 }
10917
10918 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10919  * successfully restored
10920  */
10921 static int tg3_test_msi(struct tg3 *tp)
10922 {
10923         int err;
10924         u16 pci_cmd;
10925
10926         if (!tg3_flag(tp, USING_MSI))
10927                 return 0;
10928
10929         /* Turn off SERR reporting in case MSI terminates with Master
10930          * Abort.
10931          */
10932         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10933         pci_write_config_word(tp->pdev, PCI_COMMAND,
10934                               pci_cmd & ~PCI_COMMAND_SERR);
10935
10936         err = tg3_test_interrupt(tp);
10937
10938         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10939
10940         if (!err)
10941                 return 0;
10942
10943         /* other failures */
10944         if (err != -EIO)
10945                 return err;
10946
10947         /* MSI test failed, go back to INTx mode */
10948         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10949                     "to INTx mode. Please report this failure to the PCI "
10950                     "maintainer and include system chipset information\n");
10951
10952         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10953
10954         pci_disable_msi(tp->pdev);
10955
10956         tg3_flag_clear(tp, USING_MSI);
10957         tp->napi[0].irq_vec = tp->pdev->irq;
10958
10959         err = tg3_request_irq(tp, 0);
10960         if (err)
10961                 return err;
10962
10963         /* Need to reset the chip because the MSI cycle may have terminated
10964          * with Master Abort.
10965          */
10966         tg3_full_lock(tp, 1);
10967
10968         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10969         err = tg3_init_hw(tp, true);
10970
10971         tg3_full_unlock(tp);
10972
10973         if (err)
10974                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10975
10976         return err;
10977 }
10978
10979 static int tg3_request_firmware(struct tg3 *tp)
10980 {
10981         const struct tg3_firmware_hdr *fw_hdr;
10982
10983         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10984                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10985                            tp->fw_needed);
10986                 return -ENOENT;
10987         }
10988
10989         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10990
10991         /* Firmware blob starts with version numbers, followed by
10992          * start address and _full_ length including BSS sections
10993          * (which must be longer than the actual data, of course
10994          */
10995
10996         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
10997         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10998                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10999                            tp->fw_len, tp->fw_needed);
11000                 release_firmware(tp->fw);
11001                 tp->fw = NULL;
11002                 return -EINVAL;
11003         }
11004
11005         /* We no longer need firmware; we have it. */
11006         tp->fw_needed = NULL;
11007         return 0;
11008 }
11009
11010 static u32 tg3_irq_count(struct tg3 *tp)
11011 {
11012         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11013
11014         if (irq_cnt > 1) {
11015                 /* We want as many rx rings enabled as there are cpus.
11016                  * In multiqueue MSI-X mode, the first MSI-X vector
11017                  * only deals with link interrupts, etc, so we add
11018                  * one to the number of vectors we are requesting.
11019                  */
11020                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11021         }
11022
11023         return irq_cnt;
11024 }
11025
11026 static bool tg3_enable_msix(struct tg3 *tp)
11027 {
11028         int i, rc;
11029         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11030
11031         tp->txq_cnt = tp->txq_req;
11032         tp->rxq_cnt = tp->rxq_req;
11033         if (!tp->rxq_cnt)
11034                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11035         if (tp->rxq_cnt > tp->rxq_max)
11036                 tp->rxq_cnt = tp->rxq_max;
11037
11038         /* Disable multiple TX rings by default.  Simple round-robin hardware
11039          * scheduling of the TX rings can cause starvation of rings with
11040          * small packets when other rings have TSO or jumbo packets.
11041          */
11042         if (!tp->txq_req)
11043                 tp->txq_cnt = 1;
11044
11045         tp->irq_cnt = tg3_irq_count(tp);
11046
11047         for (i = 0; i < tp->irq_max; i++) {
11048                 msix_ent[i].entry  = i;
11049                 msix_ent[i].vector = 0;
11050         }
11051
11052         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11053         if (rc < 0) {
11054                 return false;
11055         } else if (rc != 0) {
11056                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11057                         return false;
11058                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11059                               tp->irq_cnt, rc);
11060                 tp->irq_cnt = rc;
11061                 tp->rxq_cnt = max(rc - 1, 1);
11062                 if (tp->txq_cnt)
11063                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11064         }
11065
11066         for (i = 0; i < tp->irq_max; i++)
11067                 tp->napi[i].irq_vec = msix_ent[i].vector;
11068
11069         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11070                 pci_disable_msix(tp->pdev);
11071                 return false;
11072         }
11073
11074         if (tp->irq_cnt == 1)
11075                 return true;
11076
11077         tg3_flag_set(tp, ENABLE_RSS);
11078
11079         if (tp->txq_cnt > 1)
11080                 tg3_flag_set(tp, ENABLE_TSS);
11081
11082         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11083
11084         return true;
11085 }
11086
11087 static void tg3_ints_init(struct tg3 *tp)
11088 {
11089         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11090             !tg3_flag(tp, TAGGED_STATUS)) {
11091                 /* All MSI supporting chips should support tagged
11092                  * status.  Assert that this is the case.
11093                  */
11094                 netdev_warn(tp->dev,
11095                             "MSI without TAGGED_STATUS? Not using MSI\n");
11096                 goto defcfg;
11097         }
11098
11099         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11100                 tg3_flag_set(tp, USING_MSIX);
11101         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11102                 tg3_flag_set(tp, USING_MSI);
11103
11104         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11105                 u32 msi_mode = tr32(MSGINT_MODE);
11106                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11107                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11108                 if (!tg3_flag(tp, 1SHOT_MSI))
11109                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11110                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11111         }
11112 defcfg:
11113         if (!tg3_flag(tp, USING_MSIX)) {
11114                 tp->irq_cnt = 1;
11115                 tp->napi[0].irq_vec = tp->pdev->irq;
11116         }
11117
11118         if (tp->irq_cnt == 1) {
11119                 tp->txq_cnt = 1;
11120                 tp->rxq_cnt = 1;
11121                 netif_set_real_num_tx_queues(tp->dev, 1);
11122                 netif_set_real_num_rx_queues(tp->dev, 1);
11123         }
11124 }
11125
11126 static void tg3_ints_fini(struct tg3 *tp)
11127 {
11128         if (tg3_flag(tp, USING_MSIX))
11129                 pci_disable_msix(tp->pdev);
11130         else if (tg3_flag(tp, USING_MSI))
11131                 pci_disable_msi(tp->pdev);
11132         tg3_flag_clear(tp, USING_MSI);
11133         tg3_flag_clear(tp, USING_MSIX);
11134         tg3_flag_clear(tp, ENABLE_RSS);
11135         tg3_flag_clear(tp, ENABLE_TSS);
11136 }
11137
11138 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11139                      bool init)
11140 {
11141         struct net_device *dev = tp->dev;
11142         int i, err;
11143
11144         /*
11145          * Setup interrupts first so we know how
11146          * many NAPI resources to allocate
11147          */
11148         tg3_ints_init(tp);
11149
11150         tg3_rss_check_indir_tbl(tp);
11151
11152         /* The placement of this call is tied
11153          * to the setup and use of Host TX descriptors.
11154          */
11155         err = tg3_alloc_consistent(tp);
11156         if (err)
11157                 goto err_out1;
11158
11159         tg3_napi_init(tp);
11160
11161         tg3_napi_enable(tp);
11162
11163         for (i = 0; i < tp->irq_cnt; i++) {
11164                 struct tg3_napi *tnapi = &tp->napi[i];
11165                 err = tg3_request_irq(tp, i);
11166                 if (err) {
11167                         for (i--; i >= 0; i--) {
11168                                 tnapi = &tp->napi[i];
11169                                 free_irq(tnapi->irq_vec, tnapi);
11170                         }
11171                         goto err_out2;
11172                 }
11173         }
11174
11175         tg3_full_lock(tp, 0);
11176
11177         err = tg3_init_hw(tp, reset_phy);
11178         if (err) {
11179                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11180                 tg3_free_rings(tp);
11181         }
11182
11183         tg3_full_unlock(tp);
11184
11185         if (err)
11186                 goto err_out3;
11187
11188         if (test_irq && tg3_flag(tp, USING_MSI)) {
11189                 err = tg3_test_msi(tp);
11190
11191                 if (err) {
11192                         tg3_full_lock(tp, 0);
11193                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11194                         tg3_free_rings(tp);
11195                         tg3_full_unlock(tp);
11196
11197                         goto err_out2;
11198                 }
11199
11200                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11201                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11202
11203                         tw32(PCIE_TRANSACTION_CFG,
11204                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11205                 }
11206         }
11207
11208         tg3_phy_start(tp);
11209
11210         tg3_hwmon_open(tp);
11211
11212         tg3_full_lock(tp, 0);
11213
11214         tg3_timer_start(tp);
11215         tg3_flag_set(tp, INIT_COMPLETE);
11216         tg3_enable_ints(tp);
11217
11218         if (init)
11219                 tg3_ptp_init(tp);
11220         else
11221                 tg3_ptp_resume(tp);
11222
11223
11224         tg3_full_unlock(tp);
11225
11226         netif_tx_start_all_queues(dev);
11227
11228         /*
11229          * Reset loopback feature if it was turned on while the device was down
11230          * make sure that it's installed properly now.
11231          */
11232         if (dev->features & NETIF_F_LOOPBACK)
11233                 tg3_set_loopback(dev, dev->features);
11234
11235         return 0;
11236
11237 err_out3:
11238         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11239                 struct tg3_napi *tnapi = &tp->napi[i];
11240                 free_irq(tnapi->irq_vec, tnapi);
11241         }
11242
11243 err_out2:
11244         tg3_napi_disable(tp);
11245         tg3_napi_fini(tp);
11246         tg3_free_consistent(tp);
11247
11248 err_out1:
11249         tg3_ints_fini(tp);
11250
11251         return err;
11252 }
11253
11254 static void tg3_stop(struct tg3 *tp)
11255 {
11256         int i;
11257
11258         tg3_reset_task_cancel(tp);
11259         tg3_netif_stop(tp);
11260
11261         tg3_timer_stop(tp);
11262
11263         tg3_hwmon_close(tp);
11264
11265         tg3_phy_stop(tp);
11266
11267         tg3_full_lock(tp, 1);
11268
11269         tg3_disable_ints(tp);
11270
11271         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11272         tg3_free_rings(tp);
11273         tg3_flag_clear(tp, INIT_COMPLETE);
11274
11275         tg3_full_unlock(tp);
11276
11277         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11278                 struct tg3_napi *tnapi = &tp->napi[i];
11279                 free_irq(tnapi->irq_vec, tnapi);
11280         }
11281
11282         tg3_ints_fini(tp);
11283
11284         tg3_napi_fini(tp);
11285
11286         tg3_free_consistent(tp);
11287 }
11288
11289 static int tg3_open(struct net_device *dev)
11290 {
11291         struct tg3 *tp = netdev_priv(dev);
11292         int err;
11293
11294         if (tp->fw_needed) {
11295                 err = tg3_request_firmware(tp);
11296                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11297                         if (err) {
11298                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11299                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11300                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11301                                 netdev_warn(tp->dev, "EEE capability restored\n");
11302                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11303                         }
11304                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11305                         if (err)
11306                                 return err;
11307                 } else if (err) {
11308                         netdev_warn(tp->dev, "TSO capability disabled\n");
11309                         tg3_flag_clear(tp, TSO_CAPABLE);
11310                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11311                         netdev_notice(tp->dev, "TSO capability restored\n");
11312                         tg3_flag_set(tp, TSO_CAPABLE);
11313                 }
11314         }
11315
11316         tg3_carrier_off(tp);
11317
11318         err = tg3_power_up(tp);
11319         if (err)
11320                 return err;
11321
11322         tg3_full_lock(tp, 0);
11323
11324         tg3_disable_ints(tp);
11325         tg3_flag_clear(tp, INIT_COMPLETE);
11326
11327         tg3_full_unlock(tp);
11328
11329         err = tg3_start(tp,
11330                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11331                         true, true);
11332         if (err) {
11333                 tg3_frob_aux_power(tp, false);
11334                 pci_set_power_state(tp->pdev, PCI_D3hot);
11335         }
11336
11337         if (tg3_flag(tp, PTP_CAPABLE)) {
11338                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11339                                                    &tp->pdev->dev);
11340                 if (IS_ERR(tp->ptp_clock))
11341                         tp->ptp_clock = NULL;
11342         }
11343
11344         return err;
11345 }
11346
11347 static int tg3_close(struct net_device *dev)
11348 {
11349         struct tg3 *tp = netdev_priv(dev);
11350
11351         tg3_ptp_fini(tp);
11352
11353         tg3_stop(tp);
11354
11355         /* Clear stats across close / open calls */
11356         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11357         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11358
11359         tg3_power_down(tp);
11360
11361         tg3_carrier_off(tp);
11362
11363         return 0;
11364 }
11365
11366 static inline u64 get_stat64(tg3_stat64_t *val)
11367 {
11368        return ((u64)val->high << 32) | ((u64)val->low);
11369 }
11370
11371 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11372 {
11373         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11374
11375         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11376             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11377              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11378                 u32 val;
11379
11380                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11381                         tg3_writephy(tp, MII_TG3_TEST1,
11382                                      val | MII_TG3_TEST1_CRC_EN);
11383                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11384                 } else
11385                         val = 0;
11386
11387                 tp->phy_crc_errors += val;
11388
11389                 return tp->phy_crc_errors;
11390         }
11391
11392         return get_stat64(&hw_stats->rx_fcs_errors);
11393 }
11394
11395 #define ESTAT_ADD(member) \
11396         estats->member =        old_estats->member + \
11397                                 get_stat64(&hw_stats->member)
11398
11399 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11400 {
11401         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11402         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11403
11404         ESTAT_ADD(rx_octets);
11405         ESTAT_ADD(rx_fragments);
11406         ESTAT_ADD(rx_ucast_packets);
11407         ESTAT_ADD(rx_mcast_packets);
11408         ESTAT_ADD(rx_bcast_packets);
11409         ESTAT_ADD(rx_fcs_errors);
11410         ESTAT_ADD(rx_align_errors);
11411         ESTAT_ADD(rx_xon_pause_rcvd);
11412         ESTAT_ADD(rx_xoff_pause_rcvd);
11413         ESTAT_ADD(rx_mac_ctrl_rcvd);
11414         ESTAT_ADD(rx_xoff_entered);
11415         ESTAT_ADD(rx_frame_too_long_errors);
11416         ESTAT_ADD(rx_jabbers);
11417         ESTAT_ADD(rx_undersize_packets);
11418         ESTAT_ADD(rx_in_length_errors);
11419         ESTAT_ADD(rx_out_length_errors);
11420         ESTAT_ADD(rx_64_or_less_octet_packets);
11421         ESTAT_ADD(rx_65_to_127_octet_packets);
11422         ESTAT_ADD(rx_128_to_255_octet_packets);
11423         ESTAT_ADD(rx_256_to_511_octet_packets);
11424         ESTAT_ADD(rx_512_to_1023_octet_packets);
11425         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11426         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11427         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11428         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11429         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11430
11431         ESTAT_ADD(tx_octets);
11432         ESTAT_ADD(tx_collisions);
11433         ESTAT_ADD(tx_xon_sent);
11434         ESTAT_ADD(tx_xoff_sent);
11435         ESTAT_ADD(tx_flow_control);
11436         ESTAT_ADD(tx_mac_errors);
11437         ESTAT_ADD(tx_single_collisions);
11438         ESTAT_ADD(tx_mult_collisions);
11439         ESTAT_ADD(tx_deferred);
11440         ESTAT_ADD(tx_excessive_collisions);
11441         ESTAT_ADD(tx_late_collisions);
11442         ESTAT_ADD(tx_collide_2times);
11443         ESTAT_ADD(tx_collide_3times);
11444         ESTAT_ADD(tx_collide_4times);
11445         ESTAT_ADD(tx_collide_5times);
11446         ESTAT_ADD(tx_collide_6times);
11447         ESTAT_ADD(tx_collide_7times);
11448         ESTAT_ADD(tx_collide_8times);
11449         ESTAT_ADD(tx_collide_9times);
11450         ESTAT_ADD(tx_collide_10times);
11451         ESTAT_ADD(tx_collide_11times);
11452         ESTAT_ADD(tx_collide_12times);
11453         ESTAT_ADD(tx_collide_13times);
11454         ESTAT_ADD(tx_collide_14times);
11455         ESTAT_ADD(tx_collide_15times);
11456         ESTAT_ADD(tx_ucast_packets);
11457         ESTAT_ADD(tx_mcast_packets);
11458         ESTAT_ADD(tx_bcast_packets);
11459         ESTAT_ADD(tx_carrier_sense_errors);
11460         ESTAT_ADD(tx_discards);
11461         ESTAT_ADD(tx_errors);
11462
11463         ESTAT_ADD(dma_writeq_full);
11464         ESTAT_ADD(dma_write_prioq_full);
11465         ESTAT_ADD(rxbds_empty);
11466         ESTAT_ADD(rx_discards);
11467         ESTAT_ADD(rx_errors);
11468         ESTAT_ADD(rx_threshold_hit);
11469
11470         ESTAT_ADD(dma_readq_full);
11471         ESTAT_ADD(dma_read_prioq_full);
11472         ESTAT_ADD(tx_comp_queue_full);
11473
11474         ESTAT_ADD(ring_set_send_prod_index);
11475         ESTAT_ADD(ring_status_update);
11476         ESTAT_ADD(nic_irqs);
11477         ESTAT_ADD(nic_avoided_irqs);
11478         ESTAT_ADD(nic_tx_threshold_hit);
11479
11480         ESTAT_ADD(mbuf_lwm_thresh_hit);
11481 }
11482
11483 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11484 {
11485         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11486         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11487
11488         stats->rx_packets = old_stats->rx_packets +
11489                 get_stat64(&hw_stats->rx_ucast_packets) +
11490                 get_stat64(&hw_stats->rx_mcast_packets) +
11491                 get_stat64(&hw_stats->rx_bcast_packets);
11492
11493         stats->tx_packets = old_stats->tx_packets +
11494                 get_stat64(&hw_stats->tx_ucast_packets) +
11495                 get_stat64(&hw_stats->tx_mcast_packets) +
11496                 get_stat64(&hw_stats->tx_bcast_packets);
11497
11498         stats->rx_bytes = old_stats->rx_bytes +
11499                 get_stat64(&hw_stats->rx_octets);
11500         stats->tx_bytes = old_stats->tx_bytes +
11501                 get_stat64(&hw_stats->tx_octets);
11502
11503         stats->rx_errors = old_stats->rx_errors +
11504                 get_stat64(&hw_stats->rx_errors);
11505         stats->tx_errors = old_stats->tx_errors +
11506                 get_stat64(&hw_stats->tx_errors) +
11507                 get_stat64(&hw_stats->tx_mac_errors) +
11508                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11509                 get_stat64(&hw_stats->tx_discards);
11510
11511         stats->multicast = old_stats->multicast +
11512                 get_stat64(&hw_stats->rx_mcast_packets);
11513         stats->collisions = old_stats->collisions +
11514                 get_stat64(&hw_stats->tx_collisions);
11515
11516         stats->rx_length_errors = old_stats->rx_length_errors +
11517                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11518                 get_stat64(&hw_stats->rx_undersize_packets);
11519
11520         stats->rx_over_errors = old_stats->rx_over_errors +
11521                 get_stat64(&hw_stats->rxbds_empty);
11522         stats->rx_frame_errors = old_stats->rx_frame_errors +
11523                 get_stat64(&hw_stats->rx_align_errors);
11524         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11525                 get_stat64(&hw_stats->tx_discards);
11526         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11527                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11528
11529         stats->rx_crc_errors = old_stats->rx_crc_errors +
11530                 tg3_calc_crc_errors(tp);
11531
11532         stats->rx_missed_errors = old_stats->rx_missed_errors +
11533                 get_stat64(&hw_stats->rx_discards);
11534
11535         stats->rx_dropped = tp->rx_dropped;
11536         stats->tx_dropped = tp->tx_dropped;
11537 }
11538
11539 static int tg3_get_regs_len(struct net_device *dev)
11540 {
11541         return TG3_REG_BLK_SIZE;
11542 }
11543
11544 static void tg3_get_regs(struct net_device *dev,
11545                 struct ethtool_regs *regs, void *_p)
11546 {
11547         struct tg3 *tp = netdev_priv(dev);
11548
11549         regs->version = 0;
11550
11551         memset(_p, 0, TG3_REG_BLK_SIZE);
11552
11553         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11554                 return;
11555
11556         tg3_full_lock(tp, 0);
11557
11558         tg3_dump_legacy_regs(tp, (u32 *)_p);
11559
11560         tg3_full_unlock(tp);
11561 }
11562
11563 static int tg3_get_eeprom_len(struct net_device *dev)
11564 {
11565         struct tg3 *tp = netdev_priv(dev);
11566
11567         return tp->nvram_size;
11568 }
11569
11570 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11571 {
11572         struct tg3 *tp = netdev_priv(dev);
11573         int ret;
11574         u8  *pd;
11575         u32 i, offset, len, b_offset, b_count;
11576         __be32 val;
11577
11578         if (tg3_flag(tp, NO_NVRAM))
11579                 return -EINVAL;
11580
11581         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11582                 return -EAGAIN;
11583
11584         offset = eeprom->offset;
11585         len = eeprom->len;
11586         eeprom->len = 0;
11587
11588         eeprom->magic = TG3_EEPROM_MAGIC;
11589
11590         if (offset & 3) {
11591                 /* adjustments to start on required 4 byte boundary */
11592                 b_offset = offset & 3;
11593                 b_count = 4 - b_offset;
11594                 if (b_count > len) {
11595                         /* i.e. offset=1 len=2 */
11596                         b_count = len;
11597                 }
11598                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11599                 if (ret)
11600                         return ret;
11601                 memcpy(data, ((char *)&val) + b_offset, b_count);
11602                 len -= b_count;
11603                 offset += b_count;
11604                 eeprom->len += b_count;
11605         }
11606
11607         /* read bytes up to the last 4 byte boundary */
11608         pd = &data[eeprom->len];
11609         for (i = 0; i < (len - (len & 3)); i += 4) {
11610                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11611                 if (ret) {
11612                         eeprom->len += i;
11613                         return ret;
11614                 }
11615                 memcpy(pd + i, &val, 4);
11616         }
11617         eeprom->len += i;
11618
11619         if (len & 3) {
11620                 /* read last bytes not ending on 4 byte boundary */
11621                 pd = &data[eeprom->len];
11622                 b_count = len & 3;
11623                 b_offset = offset + len - b_count;
11624                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11625                 if (ret)
11626                         return ret;
11627                 memcpy(pd, &val, b_count);
11628                 eeprom->len += b_count;
11629         }
11630         return 0;
11631 }
11632
11633 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11634 {
11635         struct tg3 *tp = netdev_priv(dev);
11636         int ret;
11637         u32 offset, len, b_offset, odd_len;
11638         u8 *buf;
11639         __be32 start, end;
11640
11641         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11642                 return -EAGAIN;
11643
11644         if (tg3_flag(tp, NO_NVRAM) ||
11645             eeprom->magic != TG3_EEPROM_MAGIC)
11646                 return -EINVAL;
11647
11648         offset = eeprom->offset;
11649         len = eeprom->len;
11650
11651         if ((b_offset = (offset & 3))) {
11652                 /* adjustments to start on required 4 byte boundary */
11653                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11654                 if (ret)
11655                         return ret;
11656                 len += b_offset;
11657                 offset &= ~3;
11658                 if (len < 4)
11659                         len = 4;
11660         }
11661
11662         odd_len = 0;
11663         if (len & 3) {
11664                 /* adjustments to end on required 4 byte boundary */
11665                 odd_len = 1;
11666                 len = (len + 3) & ~3;
11667                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11668                 if (ret)
11669                         return ret;
11670         }
11671
11672         buf = data;
11673         if (b_offset || odd_len) {
11674                 buf = kmalloc(len, GFP_KERNEL);
11675                 if (!buf)
11676                         return -ENOMEM;
11677                 if (b_offset)
11678                         memcpy(buf, &start, 4);
11679                 if (odd_len)
11680                         memcpy(buf+len-4, &end, 4);
11681                 memcpy(buf + b_offset, data, eeprom->len);
11682         }
11683
11684         ret = tg3_nvram_write_block(tp, offset, len, buf);
11685
11686         if (buf != data)
11687                 kfree(buf);
11688
11689         return ret;
11690 }
11691
11692 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11693 {
11694         struct tg3 *tp = netdev_priv(dev);
11695
11696         if (tg3_flag(tp, USE_PHYLIB)) {
11697                 struct phy_device *phydev;
11698                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11699                         return -EAGAIN;
11700                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11701                 return phy_ethtool_gset(phydev, cmd);
11702         }
11703
11704         cmd->supported = (SUPPORTED_Autoneg);
11705
11706         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11707                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11708                                    SUPPORTED_1000baseT_Full);
11709
11710         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11711                 cmd->supported |= (SUPPORTED_100baseT_Half |
11712                                   SUPPORTED_100baseT_Full |
11713                                   SUPPORTED_10baseT_Half |
11714                                   SUPPORTED_10baseT_Full |
11715                                   SUPPORTED_TP);
11716                 cmd->port = PORT_TP;
11717         } else {
11718                 cmd->supported |= SUPPORTED_FIBRE;
11719                 cmd->port = PORT_FIBRE;
11720         }
11721
11722         cmd->advertising = tp->link_config.advertising;
11723         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11724                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11725                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11726                                 cmd->advertising |= ADVERTISED_Pause;
11727                         } else {
11728                                 cmd->advertising |= ADVERTISED_Pause |
11729                                                     ADVERTISED_Asym_Pause;
11730                         }
11731                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11732                         cmd->advertising |= ADVERTISED_Asym_Pause;
11733                 }
11734         }
11735         if (netif_running(dev) && tp->link_up) {
11736                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11737                 cmd->duplex = tp->link_config.active_duplex;
11738                 cmd->lp_advertising = tp->link_config.rmt_adv;
11739                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11740                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11741                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11742                         else
11743                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11744                 }
11745         } else {
11746                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11747                 cmd->duplex = DUPLEX_UNKNOWN;
11748                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11749         }
11750         cmd->phy_address = tp->phy_addr;
11751         cmd->transceiver = XCVR_INTERNAL;
11752         cmd->autoneg = tp->link_config.autoneg;
11753         cmd->maxtxpkt = 0;
11754         cmd->maxrxpkt = 0;
11755         return 0;
11756 }
11757
11758 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11759 {
11760         struct tg3 *tp = netdev_priv(dev);
11761         u32 speed = ethtool_cmd_speed(cmd);
11762
11763         if (tg3_flag(tp, USE_PHYLIB)) {
11764                 struct phy_device *phydev;
11765                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11766                         return -EAGAIN;
11767                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11768                 return phy_ethtool_sset(phydev, cmd);
11769         }
11770
11771         if (cmd->autoneg != AUTONEG_ENABLE &&
11772             cmd->autoneg != AUTONEG_DISABLE)
11773                 return -EINVAL;
11774
11775         if (cmd->autoneg == AUTONEG_DISABLE &&
11776             cmd->duplex != DUPLEX_FULL &&
11777             cmd->duplex != DUPLEX_HALF)
11778                 return -EINVAL;
11779
11780         if (cmd->autoneg == AUTONEG_ENABLE) {
11781                 u32 mask = ADVERTISED_Autoneg |
11782                            ADVERTISED_Pause |
11783                            ADVERTISED_Asym_Pause;
11784
11785                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11786                         mask |= ADVERTISED_1000baseT_Half |
11787                                 ADVERTISED_1000baseT_Full;
11788
11789                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11790                         mask |= ADVERTISED_100baseT_Half |
11791                                 ADVERTISED_100baseT_Full |
11792                                 ADVERTISED_10baseT_Half |
11793                                 ADVERTISED_10baseT_Full |
11794                                 ADVERTISED_TP;
11795                 else
11796                         mask |= ADVERTISED_FIBRE;
11797
11798                 if (cmd->advertising & ~mask)
11799                         return -EINVAL;
11800
11801                 mask &= (ADVERTISED_1000baseT_Half |
11802                          ADVERTISED_1000baseT_Full |
11803                          ADVERTISED_100baseT_Half |
11804                          ADVERTISED_100baseT_Full |
11805                          ADVERTISED_10baseT_Half |
11806                          ADVERTISED_10baseT_Full);
11807
11808                 cmd->advertising &= mask;
11809         } else {
11810                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11811                         if (speed != SPEED_1000)
11812                                 return -EINVAL;
11813
11814                         if (cmd->duplex != DUPLEX_FULL)
11815                                 return -EINVAL;
11816                 } else {
11817                         if (speed != SPEED_100 &&
11818                             speed != SPEED_10)
11819                                 return -EINVAL;
11820                 }
11821         }
11822
11823         tg3_full_lock(tp, 0);
11824
11825         tp->link_config.autoneg = cmd->autoneg;
11826         if (cmd->autoneg == AUTONEG_ENABLE) {
11827                 tp->link_config.advertising = (cmd->advertising |
11828                                               ADVERTISED_Autoneg);
11829                 tp->link_config.speed = SPEED_UNKNOWN;
11830                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11831         } else {
11832                 tp->link_config.advertising = 0;
11833                 tp->link_config.speed = speed;
11834                 tp->link_config.duplex = cmd->duplex;
11835         }
11836
11837         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11838
11839         tg3_warn_mgmt_link_flap(tp);
11840
11841         if (netif_running(dev))
11842                 tg3_setup_phy(tp, true);
11843
11844         tg3_full_unlock(tp);
11845
11846         return 0;
11847 }
11848
11849 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11850 {
11851         struct tg3 *tp = netdev_priv(dev);
11852
11853         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11854         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11855         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11856         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11857 }
11858
11859 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11860 {
11861         struct tg3 *tp = netdev_priv(dev);
11862
11863         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11864                 wol->supported = WAKE_MAGIC;
11865         else
11866                 wol->supported = 0;
11867         wol->wolopts = 0;
11868         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11869                 wol->wolopts = WAKE_MAGIC;
11870         memset(&wol->sopass, 0, sizeof(wol->sopass));
11871 }
11872
11873 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11874 {
11875         struct tg3 *tp = netdev_priv(dev);
11876         struct device *dp = &tp->pdev->dev;
11877
11878         if (wol->wolopts & ~WAKE_MAGIC)
11879                 return -EINVAL;
11880         if ((wol->wolopts & WAKE_MAGIC) &&
11881             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11882                 return -EINVAL;
11883
11884         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11885
11886         spin_lock_bh(&tp->lock);
11887         if (device_may_wakeup(dp))
11888                 tg3_flag_set(tp, WOL_ENABLE);
11889         else
11890                 tg3_flag_clear(tp, WOL_ENABLE);
11891         spin_unlock_bh(&tp->lock);
11892
11893         return 0;
11894 }
11895
11896 static u32 tg3_get_msglevel(struct net_device *dev)
11897 {
11898         struct tg3 *tp = netdev_priv(dev);
11899         return tp->msg_enable;
11900 }
11901
11902 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11903 {
11904         struct tg3 *tp = netdev_priv(dev);
11905         tp->msg_enable = value;
11906 }
11907
11908 static int tg3_nway_reset(struct net_device *dev)
11909 {
11910         struct tg3 *tp = netdev_priv(dev);
11911         int r;
11912
11913         if (!netif_running(dev))
11914                 return -EAGAIN;
11915
11916         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11917                 return -EINVAL;
11918
11919         tg3_warn_mgmt_link_flap(tp);
11920
11921         if (tg3_flag(tp, USE_PHYLIB)) {
11922                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11923                         return -EAGAIN;
11924                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11925         } else {
11926                 u32 bmcr;
11927
11928                 spin_lock_bh(&tp->lock);
11929                 r = -EINVAL;
11930                 tg3_readphy(tp, MII_BMCR, &bmcr);
11931                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11932                     ((bmcr & BMCR_ANENABLE) ||
11933                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11934                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11935                                                    BMCR_ANENABLE);
11936                         r = 0;
11937                 }
11938                 spin_unlock_bh(&tp->lock);
11939         }
11940
11941         return r;
11942 }
11943
11944 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11945 {
11946         struct tg3 *tp = netdev_priv(dev);
11947
11948         ering->rx_max_pending = tp->rx_std_ring_mask;
11949         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11950                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11951         else
11952                 ering->rx_jumbo_max_pending = 0;
11953
11954         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11955
11956         ering->rx_pending = tp->rx_pending;
11957         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11958                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11959         else
11960                 ering->rx_jumbo_pending = 0;
11961
11962         ering->tx_pending = tp->napi[0].tx_pending;
11963 }
11964
11965 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11966 {
11967         struct tg3 *tp = netdev_priv(dev);
11968         int i, irq_sync = 0, err = 0;
11969
11970         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11971             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11972             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11973             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11974             (tg3_flag(tp, TSO_BUG) &&
11975              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11976                 return -EINVAL;
11977
11978         if (netif_running(dev)) {
11979                 tg3_phy_stop(tp);
11980                 tg3_netif_stop(tp);
11981                 irq_sync = 1;
11982         }
11983
11984         tg3_full_lock(tp, irq_sync);
11985
11986         tp->rx_pending = ering->rx_pending;
11987
11988         if (tg3_flag(tp, MAX_RXPEND_64) &&
11989             tp->rx_pending > 63)
11990                 tp->rx_pending = 63;
11991         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11992
11993         for (i = 0; i < tp->irq_max; i++)
11994                 tp->napi[i].tx_pending = ering->tx_pending;
11995
11996         if (netif_running(dev)) {
11997                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11998                 err = tg3_restart_hw(tp, false);
11999                 if (!err)
12000                         tg3_netif_start(tp);
12001         }
12002
12003         tg3_full_unlock(tp);
12004
12005         if (irq_sync && !err)
12006                 tg3_phy_start(tp);
12007
12008         return err;
12009 }
12010
12011 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12012 {
12013         struct tg3 *tp = netdev_priv(dev);
12014
12015         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12016
12017         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12018                 epause->rx_pause = 1;
12019         else
12020                 epause->rx_pause = 0;
12021
12022         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12023                 epause->tx_pause = 1;
12024         else
12025                 epause->tx_pause = 0;
12026 }
12027
12028 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12029 {
12030         struct tg3 *tp = netdev_priv(dev);
12031         int err = 0;
12032
12033         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12034                 tg3_warn_mgmt_link_flap(tp);
12035
12036         if (tg3_flag(tp, USE_PHYLIB)) {
12037                 u32 newadv;
12038                 struct phy_device *phydev;
12039
12040                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12041
12042                 if (!(phydev->supported & SUPPORTED_Pause) ||
12043                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12044                      (epause->rx_pause != epause->tx_pause)))
12045                         return -EINVAL;
12046
12047                 tp->link_config.flowctrl = 0;
12048                 if (epause->rx_pause) {
12049                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12050
12051                         if (epause->tx_pause) {
12052                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12053                                 newadv = ADVERTISED_Pause;
12054                         } else
12055                                 newadv = ADVERTISED_Pause |
12056                                          ADVERTISED_Asym_Pause;
12057                 } else if (epause->tx_pause) {
12058                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12059                         newadv = ADVERTISED_Asym_Pause;
12060                 } else
12061                         newadv = 0;
12062
12063                 if (epause->autoneg)
12064                         tg3_flag_set(tp, PAUSE_AUTONEG);
12065                 else
12066                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12067
12068                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12069                         u32 oldadv = phydev->advertising &
12070                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12071                         if (oldadv != newadv) {
12072                                 phydev->advertising &=
12073                                         ~(ADVERTISED_Pause |
12074                                           ADVERTISED_Asym_Pause);
12075                                 phydev->advertising |= newadv;
12076                                 if (phydev->autoneg) {
12077                                         /*
12078                                          * Always renegotiate the link to
12079                                          * inform our link partner of our
12080                                          * flow control settings, even if the
12081                                          * flow control is forced.  Let
12082                                          * tg3_adjust_link() do the final
12083                                          * flow control setup.
12084                                          */
12085                                         return phy_start_aneg(phydev);
12086                                 }
12087                         }
12088
12089                         if (!epause->autoneg)
12090                                 tg3_setup_flow_control(tp, 0, 0);
12091                 } else {
12092                         tp->link_config.advertising &=
12093                                         ~(ADVERTISED_Pause |
12094                                           ADVERTISED_Asym_Pause);
12095                         tp->link_config.advertising |= newadv;
12096                 }
12097         } else {
12098                 int irq_sync = 0;
12099
12100                 if (netif_running(dev)) {
12101                         tg3_netif_stop(tp);
12102                         irq_sync = 1;
12103                 }
12104
12105                 tg3_full_lock(tp, irq_sync);
12106
12107                 if (epause->autoneg)
12108                         tg3_flag_set(tp, PAUSE_AUTONEG);
12109                 else
12110                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12111                 if (epause->rx_pause)
12112                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12113                 else
12114                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12115                 if (epause->tx_pause)
12116                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12117                 else
12118                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12119
12120                 if (netif_running(dev)) {
12121                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12122                         err = tg3_restart_hw(tp, false);
12123                         if (!err)
12124                                 tg3_netif_start(tp);
12125                 }
12126
12127                 tg3_full_unlock(tp);
12128         }
12129
12130         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12131
12132         return err;
12133 }
12134
12135 static int tg3_get_sset_count(struct net_device *dev, int sset)
12136 {
12137         switch (sset) {
12138         case ETH_SS_TEST:
12139                 return TG3_NUM_TEST;
12140         case ETH_SS_STATS:
12141                 return TG3_NUM_STATS;
12142         default:
12143                 return -EOPNOTSUPP;
12144         }
12145 }
12146
12147 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12148                          u32 *rules __always_unused)
12149 {
12150         struct tg3 *tp = netdev_priv(dev);
12151
12152         if (!tg3_flag(tp, SUPPORT_MSIX))
12153                 return -EOPNOTSUPP;
12154
12155         switch (info->cmd) {
12156         case ETHTOOL_GRXRINGS:
12157                 if (netif_running(tp->dev))
12158                         info->data = tp->rxq_cnt;
12159                 else {
12160                         info->data = num_online_cpus();
12161                         if (info->data > TG3_RSS_MAX_NUM_QS)
12162                                 info->data = TG3_RSS_MAX_NUM_QS;
12163                 }
12164
12165                 /* The first interrupt vector only
12166                  * handles link interrupts.
12167                  */
12168                 info->data -= 1;
12169                 return 0;
12170
12171         default:
12172                 return -EOPNOTSUPP;
12173         }
12174 }
12175
12176 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12177 {
12178         u32 size = 0;
12179         struct tg3 *tp = netdev_priv(dev);
12180
12181         if (tg3_flag(tp, SUPPORT_MSIX))
12182                 size = TG3_RSS_INDIR_TBL_SIZE;
12183
12184         return size;
12185 }
12186
12187 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12188 {
12189         struct tg3 *tp = netdev_priv(dev);
12190         int i;
12191
12192         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12193                 indir[i] = tp->rss_ind_tbl[i];
12194
12195         return 0;
12196 }
12197
12198 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12199 {
12200         struct tg3 *tp = netdev_priv(dev);
12201         size_t i;
12202
12203         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12204                 tp->rss_ind_tbl[i] = indir[i];
12205
12206         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12207                 return 0;
12208
12209         /* It is legal to write the indirection
12210          * table while the device is running.
12211          */
12212         tg3_full_lock(tp, 0);
12213         tg3_rss_write_indir_tbl(tp);
12214         tg3_full_unlock(tp);
12215
12216         return 0;
12217 }
12218
12219 static void tg3_get_channels(struct net_device *dev,
12220                              struct ethtool_channels *channel)
12221 {
12222         struct tg3 *tp = netdev_priv(dev);
12223         u32 deflt_qs = netif_get_num_default_rss_queues();
12224
12225         channel->max_rx = tp->rxq_max;
12226         channel->max_tx = tp->txq_max;
12227
12228         if (netif_running(dev)) {
12229                 channel->rx_count = tp->rxq_cnt;
12230                 channel->tx_count = tp->txq_cnt;
12231         } else {
12232                 if (tp->rxq_req)
12233                         channel->rx_count = tp->rxq_req;
12234                 else
12235                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12236
12237                 if (tp->txq_req)
12238                         channel->tx_count = tp->txq_req;
12239                 else
12240                         channel->tx_count = min(deflt_qs, tp->txq_max);
12241         }
12242 }
12243
12244 static int tg3_set_channels(struct net_device *dev,
12245                             struct ethtool_channels *channel)
12246 {
12247         struct tg3 *tp = netdev_priv(dev);
12248
12249         if (!tg3_flag(tp, SUPPORT_MSIX))
12250                 return -EOPNOTSUPP;
12251
12252         if (channel->rx_count > tp->rxq_max ||
12253             channel->tx_count > tp->txq_max)
12254                 return -EINVAL;
12255
12256         tp->rxq_req = channel->rx_count;
12257         tp->txq_req = channel->tx_count;
12258
12259         if (!netif_running(dev))
12260                 return 0;
12261
12262         tg3_stop(tp);
12263
12264         tg3_carrier_off(tp);
12265
12266         tg3_start(tp, true, false, false);
12267
12268         return 0;
12269 }
12270
12271 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12272 {
12273         switch (stringset) {
12274         case ETH_SS_STATS:
12275                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12276                 break;
12277         case ETH_SS_TEST:
12278                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12279                 break;
12280         default:
12281                 WARN_ON(1);     /* we need a WARN() */
12282                 break;
12283         }
12284 }
12285
12286 static int tg3_set_phys_id(struct net_device *dev,
12287                             enum ethtool_phys_id_state state)
12288 {
12289         struct tg3 *tp = netdev_priv(dev);
12290
12291         if (!netif_running(tp->dev))
12292                 return -EAGAIN;
12293
12294         switch (state) {
12295         case ETHTOOL_ID_ACTIVE:
12296                 return 1;       /* cycle on/off once per second */
12297
12298         case ETHTOOL_ID_ON:
12299                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12300                      LED_CTRL_1000MBPS_ON |
12301                      LED_CTRL_100MBPS_ON |
12302                      LED_CTRL_10MBPS_ON |
12303                      LED_CTRL_TRAFFIC_OVERRIDE |
12304                      LED_CTRL_TRAFFIC_BLINK |
12305                      LED_CTRL_TRAFFIC_LED);
12306                 break;
12307
12308         case ETHTOOL_ID_OFF:
12309                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12310                      LED_CTRL_TRAFFIC_OVERRIDE);
12311                 break;
12312
12313         case ETHTOOL_ID_INACTIVE:
12314                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12315                 break;
12316         }
12317
12318         return 0;
12319 }
12320
12321 static void tg3_get_ethtool_stats(struct net_device *dev,
12322                                    struct ethtool_stats *estats, u64 *tmp_stats)
12323 {
12324         struct tg3 *tp = netdev_priv(dev);
12325
12326         if (tp->hw_stats)
12327                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12328         else
12329                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12330 }
12331
12332 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12333 {
12334         int i;
12335         __be32 *buf;
12336         u32 offset = 0, len = 0;
12337         u32 magic, val;
12338
12339         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12340                 return NULL;
12341
12342         if (magic == TG3_EEPROM_MAGIC) {
12343                 for (offset = TG3_NVM_DIR_START;
12344                      offset < TG3_NVM_DIR_END;
12345                      offset += TG3_NVM_DIRENT_SIZE) {
12346                         if (tg3_nvram_read(tp, offset, &val))
12347                                 return NULL;
12348
12349                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12350                             TG3_NVM_DIRTYPE_EXTVPD)
12351                                 break;
12352                 }
12353
12354                 if (offset != TG3_NVM_DIR_END) {
12355                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12356                         if (tg3_nvram_read(tp, offset + 4, &offset))
12357                                 return NULL;
12358
12359                         offset = tg3_nvram_logical_addr(tp, offset);
12360                 }
12361         }
12362
12363         if (!offset || !len) {
12364                 offset = TG3_NVM_VPD_OFF;
12365                 len = TG3_NVM_VPD_LEN;
12366         }
12367
12368         buf = kmalloc(len, GFP_KERNEL);
12369         if (buf == NULL)
12370                 return NULL;
12371
12372         if (magic == TG3_EEPROM_MAGIC) {
12373                 for (i = 0; i < len; i += 4) {
12374                         /* The data is in little-endian format in NVRAM.
12375                          * Use the big-endian read routines to preserve
12376                          * the byte order as it exists in NVRAM.
12377                          */
12378                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12379                                 goto error;
12380                 }
12381         } else {
12382                 u8 *ptr;
12383                 ssize_t cnt;
12384                 unsigned int pos = 0;
12385
12386                 ptr = (u8 *)&buf[0];
12387                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12388                         cnt = pci_read_vpd(tp->pdev, pos,
12389                                            len - pos, ptr);
12390                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12391                                 cnt = 0;
12392                         else if (cnt < 0)
12393                                 goto error;
12394                 }
12395                 if (pos != len)
12396                         goto error;
12397         }
12398
12399         *vpdlen = len;
12400
12401         return buf;
12402
12403 error:
12404         kfree(buf);
12405         return NULL;
12406 }
12407
12408 #define NVRAM_TEST_SIZE 0x100
12409 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12410 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12411 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12412 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12413 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12414 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12415 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12416 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12417
12418 static int tg3_test_nvram(struct tg3 *tp)
12419 {
12420         u32 csum, magic, len;
12421         __be32 *buf;
12422         int i, j, k, err = 0, size;
12423
12424         if (tg3_flag(tp, NO_NVRAM))
12425                 return 0;
12426
12427         if (tg3_nvram_read(tp, 0, &magic) != 0)
12428                 return -EIO;
12429
12430         if (magic == TG3_EEPROM_MAGIC)
12431                 size = NVRAM_TEST_SIZE;
12432         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12433                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12434                     TG3_EEPROM_SB_FORMAT_1) {
12435                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12436                         case TG3_EEPROM_SB_REVISION_0:
12437                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12438                                 break;
12439                         case TG3_EEPROM_SB_REVISION_2:
12440                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12441                                 break;
12442                         case TG3_EEPROM_SB_REVISION_3:
12443                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12444                                 break;
12445                         case TG3_EEPROM_SB_REVISION_4:
12446                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12447                                 break;
12448                         case TG3_EEPROM_SB_REVISION_5:
12449                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12450                                 break;
12451                         case TG3_EEPROM_SB_REVISION_6:
12452                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12453                                 break;
12454                         default:
12455                                 return -EIO;
12456                         }
12457                 } else
12458                         return 0;
12459         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12460                 size = NVRAM_SELFBOOT_HW_SIZE;
12461         else
12462                 return -EIO;
12463
12464         buf = kmalloc(size, GFP_KERNEL);
12465         if (buf == NULL)
12466                 return -ENOMEM;
12467
12468         err = -EIO;
12469         for (i = 0, j = 0; i < size; i += 4, j++) {
12470                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12471                 if (err)
12472                         break;
12473         }
12474         if (i < size)
12475                 goto out;
12476
12477         /* Selfboot format */
12478         magic = be32_to_cpu(buf[0]);
12479         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12480             TG3_EEPROM_MAGIC_FW) {
12481                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12482
12483                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12484                     TG3_EEPROM_SB_REVISION_2) {
12485                         /* For rev 2, the csum doesn't include the MBA. */
12486                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12487                                 csum8 += buf8[i];
12488                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12489                                 csum8 += buf8[i];
12490                 } else {
12491                         for (i = 0; i < size; i++)
12492                                 csum8 += buf8[i];
12493                 }
12494
12495                 if (csum8 == 0) {
12496                         err = 0;
12497                         goto out;
12498                 }
12499
12500                 err = -EIO;
12501                 goto out;
12502         }
12503
12504         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12505             TG3_EEPROM_MAGIC_HW) {
12506                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12507                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12508                 u8 *buf8 = (u8 *) buf;
12509
12510                 /* Separate the parity bits and the data bytes.  */
12511                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12512                         if ((i == 0) || (i == 8)) {
12513                                 int l;
12514                                 u8 msk;
12515
12516                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12517                                         parity[k++] = buf8[i] & msk;
12518                                 i++;
12519                         } else if (i == 16) {
12520                                 int l;
12521                                 u8 msk;
12522
12523                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12524                                         parity[k++] = buf8[i] & msk;
12525                                 i++;
12526
12527                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12528                                         parity[k++] = buf8[i] & msk;
12529                                 i++;
12530                         }
12531                         data[j++] = buf8[i];
12532                 }
12533
12534                 err = -EIO;
12535                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12536                         u8 hw8 = hweight8(data[i]);
12537
12538                         if ((hw8 & 0x1) && parity[i])
12539                                 goto out;
12540                         else if (!(hw8 & 0x1) && !parity[i])
12541                                 goto out;
12542                 }
12543                 err = 0;
12544                 goto out;
12545         }
12546
12547         err = -EIO;
12548
12549         /* Bootstrap checksum at offset 0x10 */
12550         csum = calc_crc((unsigned char *) buf, 0x10);
12551         if (csum != le32_to_cpu(buf[0x10/4]))
12552                 goto out;
12553
12554         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12555         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12556         if (csum != le32_to_cpu(buf[0xfc/4]))
12557                 goto out;
12558
12559         kfree(buf);
12560
12561         buf = tg3_vpd_readblock(tp, &len);
12562         if (!buf)
12563                 return -ENOMEM;
12564
12565         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12566         if (i > 0) {
12567                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12568                 if (j < 0)
12569                         goto out;
12570
12571                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12572                         goto out;
12573
12574                 i += PCI_VPD_LRDT_TAG_SIZE;
12575                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12576                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12577                 if (j > 0) {
12578                         u8 csum8 = 0;
12579
12580                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12581
12582                         for (i = 0; i <= j; i++)
12583                                 csum8 += ((u8 *)buf)[i];
12584
12585                         if (csum8)
12586                                 goto out;
12587                 }
12588         }
12589
12590         err = 0;
12591
12592 out:
12593         kfree(buf);
12594         return err;
12595 }
12596
12597 #define TG3_SERDES_TIMEOUT_SEC  2
12598 #define TG3_COPPER_TIMEOUT_SEC  6
12599
12600 static int tg3_test_link(struct tg3 *tp)
12601 {
12602         int i, max;
12603
12604         if (!netif_running(tp->dev))
12605                 return -ENODEV;
12606
12607         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12608                 max = TG3_SERDES_TIMEOUT_SEC;
12609         else
12610                 max = TG3_COPPER_TIMEOUT_SEC;
12611
12612         for (i = 0; i < max; i++) {
12613                 if (tp->link_up)
12614                         return 0;
12615
12616                 if (msleep_interruptible(1000))
12617                         break;
12618         }
12619
12620         return -EIO;
12621 }
12622
12623 /* Only test the commonly used registers */
12624 static int tg3_test_registers(struct tg3 *tp)
12625 {
12626         int i, is_5705, is_5750;
12627         u32 offset, read_mask, write_mask, val, save_val, read_val;
12628         static struct {
12629                 u16 offset;
12630                 u16 flags;
12631 #define TG3_FL_5705     0x1
12632 #define TG3_FL_NOT_5705 0x2
12633 #define TG3_FL_NOT_5788 0x4
12634 #define TG3_FL_NOT_5750 0x8
12635                 u32 read_mask;
12636                 u32 write_mask;
12637         } reg_tbl[] = {
12638                 /* MAC Control Registers */
12639                 { MAC_MODE, TG3_FL_NOT_5705,
12640                         0x00000000, 0x00ef6f8c },
12641                 { MAC_MODE, TG3_FL_5705,
12642                         0x00000000, 0x01ef6b8c },
12643                 { MAC_STATUS, TG3_FL_NOT_5705,
12644                         0x03800107, 0x00000000 },
12645                 { MAC_STATUS, TG3_FL_5705,
12646                         0x03800100, 0x00000000 },
12647                 { MAC_ADDR_0_HIGH, 0x0000,
12648                         0x00000000, 0x0000ffff },
12649                 { MAC_ADDR_0_LOW, 0x0000,
12650                         0x00000000, 0xffffffff },
12651                 { MAC_RX_MTU_SIZE, 0x0000,
12652                         0x00000000, 0x0000ffff },
12653                 { MAC_TX_MODE, 0x0000,
12654                         0x00000000, 0x00000070 },
12655                 { MAC_TX_LENGTHS, 0x0000,
12656                         0x00000000, 0x00003fff },
12657                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12658                         0x00000000, 0x000007fc },
12659                 { MAC_RX_MODE, TG3_FL_5705,
12660                         0x00000000, 0x000007dc },
12661                 { MAC_HASH_REG_0, 0x0000,
12662                         0x00000000, 0xffffffff },
12663                 { MAC_HASH_REG_1, 0x0000,
12664                         0x00000000, 0xffffffff },
12665                 { MAC_HASH_REG_2, 0x0000,
12666                         0x00000000, 0xffffffff },
12667                 { MAC_HASH_REG_3, 0x0000,
12668                         0x00000000, 0xffffffff },
12669
12670                 /* Receive Data and Receive BD Initiator Control Registers. */
12671                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12672                         0x00000000, 0xffffffff },
12673                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12674                         0x00000000, 0xffffffff },
12675                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12676                         0x00000000, 0x00000003 },
12677                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12678                         0x00000000, 0xffffffff },
12679                 { RCVDBDI_STD_BD+0, 0x0000,
12680                         0x00000000, 0xffffffff },
12681                 { RCVDBDI_STD_BD+4, 0x0000,
12682                         0x00000000, 0xffffffff },
12683                 { RCVDBDI_STD_BD+8, 0x0000,
12684                         0x00000000, 0xffff0002 },
12685                 { RCVDBDI_STD_BD+0xc, 0x0000,
12686                         0x00000000, 0xffffffff },
12687
12688                 /* Receive BD Initiator Control Registers. */
12689                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12690                         0x00000000, 0xffffffff },
12691                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12692                         0x00000000, 0x000003ff },
12693                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12694                         0x00000000, 0xffffffff },
12695
12696                 /* Host Coalescing Control Registers. */
12697                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12698                         0x00000000, 0x00000004 },
12699                 { HOSTCC_MODE, TG3_FL_5705,
12700                         0x00000000, 0x000000f6 },
12701                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12702                         0x00000000, 0xffffffff },
12703                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12704                         0x00000000, 0x000003ff },
12705                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12706                         0x00000000, 0xffffffff },
12707                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12708                         0x00000000, 0x000003ff },
12709                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12710                         0x00000000, 0xffffffff },
12711                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12712                         0x00000000, 0x000000ff },
12713                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12714                         0x00000000, 0xffffffff },
12715                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12716                         0x00000000, 0x000000ff },
12717                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12718                         0x00000000, 0xffffffff },
12719                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12720                         0x00000000, 0xffffffff },
12721                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12722                         0x00000000, 0xffffffff },
12723                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12724                         0x00000000, 0x000000ff },
12725                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12726                         0x00000000, 0xffffffff },
12727                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12728                         0x00000000, 0x000000ff },
12729                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12730                         0x00000000, 0xffffffff },
12731                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12732                         0x00000000, 0xffffffff },
12733                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12734                         0x00000000, 0xffffffff },
12735                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12736                         0x00000000, 0xffffffff },
12737                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12738                         0x00000000, 0xffffffff },
12739                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12740                         0xffffffff, 0x00000000 },
12741                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12742                         0xffffffff, 0x00000000 },
12743
12744                 /* Buffer Manager Control Registers. */
12745                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12746                         0x00000000, 0x007fff80 },
12747                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12748                         0x00000000, 0x007fffff },
12749                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12750                         0x00000000, 0x0000003f },
12751                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12752                         0x00000000, 0x000001ff },
12753                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12754                         0x00000000, 0x000001ff },
12755                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12756                         0xffffffff, 0x00000000 },
12757                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12758                         0xffffffff, 0x00000000 },
12759
12760                 /* Mailbox Registers */
12761                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12762                         0x00000000, 0x000001ff },
12763                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12764                         0x00000000, 0x000001ff },
12765                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12766                         0x00000000, 0x000007ff },
12767                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12768                         0x00000000, 0x000001ff },
12769
12770                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12771         };
12772
12773         is_5705 = is_5750 = 0;
12774         if (tg3_flag(tp, 5705_PLUS)) {
12775                 is_5705 = 1;
12776                 if (tg3_flag(tp, 5750_PLUS))
12777                         is_5750 = 1;
12778         }
12779
12780         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12781                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12782                         continue;
12783
12784                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12785                         continue;
12786
12787                 if (tg3_flag(tp, IS_5788) &&
12788                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12789                         continue;
12790
12791                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12792                         continue;
12793
12794                 offset = (u32) reg_tbl[i].offset;
12795                 read_mask = reg_tbl[i].read_mask;
12796                 write_mask = reg_tbl[i].write_mask;
12797
12798                 /* Save the original register content */
12799                 save_val = tr32(offset);
12800
12801                 /* Determine the read-only value. */
12802                 read_val = save_val & read_mask;
12803
12804                 /* Write zero to the register, then make sure the read-only bits
12805                  * are not changed and the read/write bits are all zeros.
12806                  */
12807                 tw32(offset, 0);
12808
12809                 val = tr32(offset);
12810
12811                 /* Test the read-only and read/write bits. */
12812                 if (((val & read_mask) != read_val) || (val & write_mask))
12813                         goto out;
12814
12815                 /* Write ones to all the bits defined by RdMask and WrMask, then
12816                  * make sure the read-only bits are not changed and the
12817                  * read/write bits are all ones.
12818                  */
12819                 tw32(offset, read_mask | write_mask);
12820
12821                 val = tr32(offset);
12822
12823                 /* Test the read-only bits. */
12824                 if ((val & read_mask) != read_val)
12825                         goto out;
12826
12827                 /* Test the read/write bits. */
12828                 if ((val & write_mask) != write_mask)
12829                         goto out;
12830
12831                 tw32(offset, save_val);
12832         }
12833
12834         return 0;
12835
12836 out:
12837         if (netif_msg_hw(tp))
12838                 netdev_err(tp->dev,
12839                            "Register test failed at offset %x\n", offset);
12840         tw32(offset, save_val);
12841         return -EIO;
12842 }
12843
12844 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12845 {
12846         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12847         int i;
12848         u32 j;
12849
12850         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12851                 for (j = 0; j < len; j += 4) {
12852                         u32 val;
12853
12854                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12855                         tg3_read_mem(tp, offset + j, &val);
12856                         if (val != test_pattern[i])
12857                                 return -EIO;
12858                 }
12859         }
12860         return 0;
12861 }
12862
12863 static int tg3_test_memory(struct tg3 *tp)
12864 {
12865         static struct mem_entry {
12866                 u32 offset;
12867                 u32 len;
12868         } mem_tbl_570x[] = {
12869                 { 0x00000000, 0x00b50},
12870                 { 0x00002000, 0x1c000},
12871                 { 0xffffffff, 0x00000}
12872         }, mem_tbl_5705[] = {
12873                 { 0x00000100, 0x0000c},
12874                 { 0x00000200, 0x00008},
12875                 { 0x00004000, 0x00800},
12876                 { 0x00006000, 0x01000},
12877                 { 0x00008000, 0x02000},
12878                 { 0x00010000, 0x0e000},
12879                 { 0xffffffff, 0x00000}
12880         }, mem_tbl_5755[] = {
12881                 { 0x00000200, 0x00008},
12882                 { 0x00004000, 0x00800},
12883                 { 0x00006000, 0x00800},
12884                 { 0x00008000, 0x02000},
12885                 { 0x00010000, 0x0c000},
12886                 { 0xffffffff, 0x00000}
12887         }, mem_tbl_5906[] = {
12888                 { 0x00000200, 0x00008},
12889                 { 0x00004000, 0x00400},
12890                 { 0x00006000, 0x00400},
12891                 { 0x00008000, 0x01000},
12892                 { 0x00010000, 0x01000},
12893                 { 0xffffffff, 0x00000}
12894         }, mem_tbl_5717[] = {
12895                 { 0x00000200, 0x00008},
12896                 { 0x00010000, 0x0a000},
12897                 { 0x00020000, 0x13c00},
12898                 { 0xffffffff, 0x00000}
12899         }, mem_tbl_57765[] = {
12900                 { 0x00000200, 0x00008},
12901                 { 0x00004000, 0x00800},
12902                 { 0x00006000, 0x09800},
12903                 { 0x00010000, 0x0a000},
12904                 { 0xffffffff, 0x00000}
12905         };
12906         struct mem_entry *mem_tbl;
12907         int err = 0;
12908         int i;
12909
12910         if (tg3_flag(tp, 5717_PLUS))
12911                 mem_tbl = mem_tbl_5717;
12912         else if (tg3_flag(tp, 57765_CLASS) ||
12913                  tg3_asic_rev(tp) == ASIC_REV_5762)
12914                 mem_tbl = mem_tbl_57765;
12915         else if (tg3_flag(tp, 5755_PLUS))
12916                 mem_tbl = mem_tbl_5755;
12917         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12918                 mem_tbl = mem_tbl_5906;
12919         else if (tg3_flag(tp, 5705_PLUS))
12920                 mem_tbl = mem_tbl_5705;
12921         else
12922                 mem_tbl = mem_tbl_570x;
12923
12924         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12925                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12926                 if (err)
12927                         break;
12928         }
12929
12930         return err;
12931 }
12932
12933 #define TG3_TSO_MSS             500
12934
12935 #define TG3_TSO_IP_HDR_LEN      20
12936 #define TG3_TSO_TCP_HDR_LEN     20
12937 #define TG3_TSO_TCP_OPT_LEN     12
12938
12939 static const u8 tg3_tso_header[] = {
12940 0x08, 0x00,
12941 0x45, 0x00, 0x00, 0x00,
12942 0x00, 0x00, 0x40, 0x00,
12943 0x40, 0x06, 0x00, 0x00,
12944 0x0a, 0x00, 0x00, 0x01,
12945 0x0a, 0x00, 0x00, 0x02,
12946 0x0d, 0x00, 0xe0, 0x00,
12947 0x00, 0x00, 0x01, 0x00,
12948 0x00, 0x00, 0x02, 0x00,
12949 0x80, 0x10, 0x10, 0x00,
12950 0x14, 0x09, 0x00, 0x00,
12951 0x01, 0x01, 0x08, 0x0a,
12952 0x11, 0x11, 0x11, 0x11,
12953 0x11, 0x11, 0x11, 0x11,
12954 };
12955
12956 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12957 {
12958         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12959         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12960         u32 budget;
12961         struct sk_buff *skb;
12962         u8 *tx_data, *rx_data;
12963         dma_addr_t map;
12964         int num_pkts, tx_len, rx_len, i, err;
12965         struct tg3_rx_buffer_desc *desc;
12966         struct tg3_napi *tnapi, *rnapi;
12967         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12968
12969         tnapi = &tp->napi[0];
12970         rnapi = &tp->napi[0];
12971         if (tp->irq_cnt > 1) {
12972                 if (tg3_flag(tp, ENABLE_RSS))
12973                         rnapi = &tp->napi[1];
12974                 if (tg3_flag(tp, ENABLE_TSS))
12975                         tnapi = &tp->napi[1];
12976         }
12977         coal_now = tnapi->coal_now | rnapi->coal_now;
12978
12979         err = -EIO;
12980
12981         tx_len = pktsz;
12982         skb = netdev_alloc_skb(tp->dev, tx_len);
12983         if (!skb)
12984                 return -ENOMEM;
12985
12986         tx_data = skb_put(skb, tx_len);
12987         memcpy(tx_data, tp->dev->dev_addr, 6);
12988         memset(tx_data + 6, 0x0, 8);
12989
12990         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12991
12992         if (tso_loopback) {
12993                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12994
12995                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12996                               TG3_TSO_TCP_OPT_LEN;
12997
12998                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12999                        sizeof(tg3_tso_header));
13000                 mss = TG3_TSO_MSS;
13001
13002                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13003                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13004
13005                 /* Set the total length field in the IP header */
13006                 iph->tot_len = htons((u16)(mss + hdr_len));
13007
13008                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13009                               TXD_FLAG_CPU_POST_DMA);
13010
13011                 if (tg3_flag(tp, HW_TSO_1) ||
13012                     tg3_flag(tp, HW_TSO_2) ||
13013                     tg3_flag(tp, HW_TSO_3)) {
13014                         struct tcphdr *th;
13015                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13016                         th = (struct tcphdr *)&tx_data[val];
13017                         th->check = 0;
13018                 } else
13019                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13020
13021                 if (tg3_flag(tp, HW_TSO_3)) {
13022                         mss |= (hdr_len & 0xc) << 12;
13023                         if (hdr_len & 0x10)
13024                                 base_flags |= 0x00000010;
13025                         base_flags |= (hdr_len & 0x3e0) << 5;
13026                 } else if (tg3_flag(tp, HW_TSO_2))
13027                         mss |= hdr_len << 9;
13028                 else if (tg3_flag(tp, HW_TSO_1) ||
13029                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13030                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13031                 } else {
13032                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13033                 }
13034
13035                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13036         } else {
13037                 num_pkts = 1;
13038                 data_off = ETH_HLEN;
13039
13040                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13041                     tx_len > VLAN_ETH_FRAME_LEN)
13042                         base_flags |= TXD_FLAG_JMB_PKT;
13043         }
13044
13045         for (i = data_off; i < tx_len; i++)
13046                 tx_data[i] = (u8) (i & 0xff);
13047
13048         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13049         if (pci_dma_mapping_error(tp->pdev, map)) {
13050                 dev_kfree_skb(skb);
13051                 return -EIO;
13052         }
13053
13054         val = tnapi->tx_prod;
13055         tnapi->tx_buffers[val].skb = skb;
13056         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13057
13058         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13059                rnapi->coal_now);
13060
13061         udelay(10);
13062
13063         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13064
13065         budget = tg3_tx_avail(tnapi);
13066         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13067                             base_flags | TXD_FLAG_END, mss, 0)) {
13068                 tnapi->tx_buffers[val].skb = NULL;
13069                 dev_kfree_skb(skb);
13070                 return -EIO;
13071         }
13072
13073         tnapi->tx_prod++;
13074
13075         /* Sync BD data before updating mailbox */
13076         wmb();
13077
13078         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13079         tr32_mailbox(tnapi->prodmbox);
13080
13081         udelay(10);
13082
13083         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13084         for (i = 0; i < 35; i++) {
13085                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13086                        coal_now);
13087
13088                 udelay(10);
13089
13090                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13091                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13092                 if ((tx_idx == tnapi->tx_prod) &&
13093                     (rx_idx == (rx_start_idx + num_pkts)))
13094                         break;
13095         }
13096
13097         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13098         dev_kfree_skb(skb);
13099
13100         if (tx_idx != tnapi->tx_prod)
13101                 goto out;
13102
13103         if (rx_idx != rx_start_idx + num_pkts)
13104                 goto out;
13105
13106         val = data_off;
13107         while (rx_idx != rx_start_idx) {
13108                 desc = &rnapi->rx_rcb[rx_start_idx++];
13109                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13110                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13111
13112                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13113                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13114                         goto out;
13115
13116                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13117                          - ETH_FCS_LEN;
13118
13119                 if (!tso_loopback) {
13120                         if (rx_len != tx_len)
13121                                 goto out;
13122
13123                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13124                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13125                                         goto out;
13126                         } else {
13127                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13128                                         goto out;
13129                         }
13130                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13131                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13132                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13133                         goto out;
13134                 }
13135
13136                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13137                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13138                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13139                                              mapping);
13140                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13141                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13142                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13143                                              mapping);
13144                 } else
13145                         goto out;
13146
13147                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13148                                             PCI_DMA_FROMDEVICE);
13149
13150                 rx_data += TG3_RX_OFFSET(tp);
13151                 for (i = data_off; i < rx_len; i++, val++) {
13152                         if (*(rx_data + i) != (u8) (val & 0xff))
13153                                 goto out;
13154                 }
13155         }
13156
13157         err = 0;
13158
13159         /* tg3_free_rings will unmap and free the rx_data */
13160 out:
13161         return err;
13162 }
13163
13164 #define TG3_STD_LOOPBACK_FAILED         1
13165 #define TG3_JMB_LOOPBACK_FAILED         2
13166 #define TG3_TSO_LOOPBACK_FAILED         4
13167 #define TG3_LOOPBACK_FAILED \
13168         (TG3_STD_LOOPBACK_FAILED | \
13169          TG3_JMB_LOOPBACK_FAILED | \
13170          TG3_TSO_LOOPBACK_FAILED)
13171
13172 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13173 {
13174         int err = -EIO;
13175         u32 eee_cap;
13176         u32 jmb_pkt_sz = 9000;
13177
13178         if (tp->dma_limit)
13179                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13180
13181         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13182         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13183
13184         if (!netif_running(tp->dev)) {
13185                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13186                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13187                 if (do_extlpbk)
13188                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13189                 goto done;
13190         }
13191
13192         err = tg3_reset_hw(tp, true);
13193         if (err) {
13194                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13195                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13196                 if (do_extlpbk)
13197                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13198                 goto done;
13199         }
13200
13201         if (tg3_flag(tp, ENABLE_RSS)) {
13202                 int i;
13203
13204                 /* Reroute all rx packets to the 1st queue */
13205                 for (i = MAC_RSS_INDIR_TBL_0;
13206                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13207                         tw32(i, 0x0);
13208         }
13209
13210         /* HW errata - mac loopback fails in some cases on 5780.
13211          * Normal traffic and PHY loopback are not affected by
13212          * errata.  Also, the MAC loopback test is deprecated for
13213          * all newer ASIC revisions.
13214          */
13215         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13216             !tg3_flag(tp, CPMU_PRESENT)) {
13217                 tg3_mac_loopback(tp, true);
13218
13219                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13220                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13221
13222                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13223                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13224                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13225
13226                 tg3_mac_loopback(tp, false);
13227         }
13228
13229         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13230             !tg3_flag(tp, USE_PHYLIB)) {
13231                 int i;
13232
13233                 tg3_phy_lpbk_set(tp, 0, false);
13234
13235                 /* Wait for link */
13236                 for (i = 0; i < 100; i++) {
13237                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13238                                 break;
13239                         mdelay(1);
13240                 }
13241
13242                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13243                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13244                 if (tg3_flag(tp, TSO_CAPABLE) &&
13245                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13246                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13247                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13248                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13249                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13250
13251                 if (do_extlpbk) {
13252                         tg3_phy_lpbk_set(tp, 0, true);
13253
13254                         /* All link indications report up, but the hardware
13255                          * isn't really ready for about 20 msec.  Double it
13256                          * to be sure.
13257                          */
13258                         mdelay(40);
13259
13260                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13261                                 data[TG3_EXT_LOOPB_TEST] |=
13262                                                         TG3_STD_LOOPBACK_FAILED;
13263                         if (tg3_flag(tp, TSO_CAPABLE) &&
13264                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13265                                 data[TG3_EXT_LOOPB_TEST] |=
13266                                                         TG3_TSO_LOOPBACK_FAILED;
13267                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13268                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13269                                 data[TG3_EXT_LOOPB_TEST] |=
13270                                                         TG3_JMB_LOOPBACK_FAILED;
13271                 }
13272
13273                 /* Re-enable gphy autopowerdown. */
13274                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13275                         tg3_phy_toggle_apd(tp, true);
13276         }
13277
13278         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13279                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13280
13281 done:
13282         tp->phy_flags |= eee_cap;
13283
13284         return err;
13285 }
13286
13287 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13288                           u64 *data)
13289 {
13290         struct tg3 *tp = netdev_priv(dev);
13291         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13292
13293         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13294             tg3_power_up(tp)) {
13295                 etest->flags |= ETH_TEST_FL_FAILED;
13296                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13297                 return;
13298         }
13299
13300         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13301
13302         if (tg3_test_nvram(tp) != 0) {
13303                 etest->flags |= ETH_TEST_FL_FAILED;
13304                 data[TG3_NVRAM_TEST] = 1;
13305         }
13306         if (!doextlpbk && tg3_test_link(tp)) {
13307                 etest->flags |= ETH_TEST_FL_FAILED;
13308                 data[TG3_LINK_TEST] = 1;
13309         }
13310         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13311                 int err, err2 = 0, irq_sync = 0;
13312
13313                 if (netif_running(dev)) {
13314                         tg3_phy_stop(tp);
13315                         tg3_netif_stop(tp);
13316                         irq_sync = 1;
13317                 }
13318
13319                 tg3_full_lock(tp, irq_sync);
13320                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13321                 err = tg3_nvram_lock(tp);
13322                 tg3_halt_cpu(tp, RX_CPU_BASE);
13323                 if (!tg3_flag(tp, 5705_PLUS))
13324                         tg3_halt_cpu(tp, TX_CPU_BASE);
13325                 if (!err)
13326                         tg3_nvram_unlock(tp);
13327
13328                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13329                         tg3_phy_reset(tp);
13330
13331                 if (tg3_test_registers(tp) != 0) {
13332                         etest->flags |= ETH_TEST_FL_FAILED;
13333                         data[TG3_REGISTER_TEST] = 1;
13334                 }
13335
13336                 if (tg3_test_memory(tp) != 0) {
13337                         etest->flags |= ETH_TEST_FL_FAILED;
13338                         data[TG3_MEMORY_TEST] = 1;
13339                 }
13340
13341                 if (doextlpbk)
13342                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13343
13344                 if (tg3_test_loopback(tp, data, doextlpbk))
13345                         etest->flags |= ETH_TEST_FL_FAILED;
13346
13347                 tg3_full_unlock(tp);
13348
13349                 if (tg3_test_interrupt(tp) != 0) {
13350                         etest->flags |= ETH_TEST_FL_FAILED;
13351                         data[TG3_INTERRUPT_TEST] = 1;
13352                 }
13353
13354                 tg3_full_lock(tp, 0);
13355
13356                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13357                 if (netif_running(dev)) {
13358                         tg3_flag_set(tp, INIT_COMPLETE);
13359                         err2 = tg3_restart_hw(tp, true);
13360                         if (!err2)
13361                                 tg3_netif_start(tp);
13362                 }
13363
13364                 tg3_full_unlock(tp);
13365
13366                 if (irq_sync && !err2)
13367                         tg3_phy_start(tp);
13368         }
13369         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13370                 tg3_power_down(tp);
13371
13372 }
13373
13374 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13375                               struct ifreq *ifr, int cmd)
13376 {
13377         struct tg3 *tp = netdev_priv(dev);
13378         struct hwtstamp_config stmpconf;
13379
13380         if (!tg3_flag(tp, PTP_CAPABLE))
13381                 return -EINVAL;
13382
13383         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13384                 return -EFAULT;
13385
13386         if (stmpconf.flags)
13387                 return -EINVAL;
13388
13389         switch (stmpconf.tx_type) {
13390         case HWTSTAMP_TX_ON:
13391                 tg3_flag_set(tp, TX_TSTAMP_EN);
13392                 break;
13393         case HWTSTAMP_TX_OFF:
13394                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13395                 break;
13396         default:
13397                 return -ERANGE;
13398         }
13399
13400         switch (stmpconf.rx_filter) {
13401         case HWTSTAMP_FILTER_NONE:
13402                 tp->rxptpctl = 0;
13403                 break;
13404         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13405                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13406                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13407                 break;
13408         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13409                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13410                                TG3_RX_PTP_CTL_SYNC_EVNT;
13411                 break;
13412         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13413                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13414                                TG3_RX_PTP_CTL_DELAY_REQ;
13415                 break;
13416         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13417                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13418                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13419                 break;
13420         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13421                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13422                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13423                 break;
13424         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13425                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13426                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13427                 break;
13428         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13429                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13430                                TG3_RX_PTP_CTL_SYNC_EVNT;
13431                 break;
13432         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13433                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13434                                TG3_RX_PTP_CTL_SYNC_EVNT;
13435                 break;
13436         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13437                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13438                                TG3_RX_PTP_CTL_SYNC_EVNT;
13439                 break;
13440         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13441                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13442                                TG3_RX_PTP_CTL_DELAY_REQ;
13443                 break;
13444         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13445                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13446                                TG3_RX_PTP_CTL_DELAY_REQ;
13447                 break;
13448         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13449                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13450                                TG3_RX_PTP_CTL_DELAY_REQ;
13451                 break;
13452         default:
13453                 return -ERANGE;
13454         }
13455
13456         if (netif_running(dev) && tp->rxptpctl)
13457                 tw32(TG3_RX_PTP_CTL,
13458                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13459
13460         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13461                 -EFAULT : 0;
13462 }
13463
13464 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13465 {
13466         struct mii_ioctl_data *data = if_mii(ifr);
13467         struct tg3 *tp = netdev_priv(dev);
13468         int err;
13469
13470         if (tg3_flag(tp, USE_PHYLIB)) {
13471                 struct phy_device *phydev;
13472                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13473                         return -EAGAIN;
13474                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13475                 return phy_mii_ioctl(phydev, ifr, cmd);
13476         }
13477
13478         switch (cmd) {
13479         case SIOCGMIIPHY:
13480                 data->phy_id = tp->phy_addr;
13481
13482                 /* fallthru */
13483         case SIOCGMIIREG: {
13484                 u32 mii_regval;
13485
13486                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13487                         break;                  /* We have no PHY */
13488
13489                 if (!netif_running(dev))
13490                         return -EAGAIN;
13491
13492                 spin_lock_bh(&tp->lock);
13493                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13494                                     data->reg_num & 0x1f, &mii_regval);
13495                 spin_unlock_bh(&tp->lock);
13496
13497                 data->val_out = mii_regval;
13498
13499                 return err;
13500         }
13501
13502         case SIOCSMIIREG:
13503                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13504                         break;                  /* We have no PHY */
13505
13506                 if (!netif_running(dev))
13507                         return -EAGAIN;
13508
13509                 spin_lock_bh(&tp->lock);
13510                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13511                                      data->reg_num & 0x1f, data->val_in);
13512                 spin_unlock_bh(&tp->lock);
13513
13514                 return err;
13515
13516         case SIOCSHWTSTAMP:
13517                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13518
13519         default:
13520                 /* do nothing */
13521                 break;
13522         }
13523         return -EOPNOTSUPP;
13524 }
13525
13526 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13527 {
13528         struct tg3 *tp = netdev_priv(dev);
13529
13530         memcpy(ec, &tp->coal, sizeof(*ec));
13531         return 0;
13532 }
13533
13534 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13535 {
13536         struct tg3 *tp = netdev_priv(dev);
13537         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13538         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13539
13540         if (!tg3_flag(tp, 5705_PLUS)) {
13541                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13542                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13543                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13544                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13545         }
13546
13547         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13548             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13549             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13550             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13551             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13552             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13553             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13554             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13555             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13556             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13557                 return -EINVAL;
13558
13559         /* No rx interrupts will be generated if both are zero */
13560         if ((ec->rx_coalesce_usecs == 0) &&
13561             (ec->rx_max_coalesced_frames == 0))
13562                 return -EINVAL;
13563
13564         /* No tx interrupts will be generated if both are zero */
13565         if ((ec->tx_coalesce_usecs == 0) &&
13566             (ec->tx_max_coalesced_frames == 0))
13567                 return -EINVAL;
13568
13569         /* Only copy relevant parameters, ignore all others. */
13570         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13571         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13572         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13573         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13574         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13575         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13576         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13577         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13578         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13579
13580         if (netif_running(dev)) {
13581                 tg3_full_lock(tp, 0);
13582                 __tg3_set_coalesce(tp, &tp->coal);
13583                 tg3_full_unlock(tp);
13584         }
13585         return 0;
13586 }
13587
13588 static const struct ethtool_ops tg3_ethtool_ops = {
13589         .get_settings           = tg3_get_settings,
13590         .set_settings           = tg3_set_settings,
13591         .get_drvinfo            = tg3_get_drvinfo,
13592         .get_regs_len           = tg3_get_regs_len,
13593         .get_regs               = tg3_get_regs,
13594         .get_wol                = tg3_get_wol,
13595         .set_wol                = tg3_set_wol,
13596         .get_msglevel           = tg3_get_msglevel,
13597         .set_msglevel           = tg3_set_msglevel,
13598         .nway_reset             = tg3_nway_reset,
13599         .get_link               = ethtool_op_get_link,
13600         .get_eeprom_len         = tg3_get_eeprom_len,
13601         .get_eeprom             = tg3_get_eeprom,
13602         .set_eeprom             = tg3_set_eeprom,
13603         .get_ringparam          = tg3_get_ringparam,
13604         .set_ringparam          = tg3_set_ringparam,
13605         .get_pauseparam         = tg3_get_pauseparam,
13606         .set_pauseparam         = tg3_set_pauseparam,
13607         .self_test              = tg3_self_test,
13608         .get_strings            = tg3_get_strings,
13609         .set_phys_id            = tg3_set_phys_id,
13610         .get_ethtool_stats      = tg3_get_ethtool_stats,
13611         .get_coalesce           = tg3_get_coalesce,
13612         .set_coalesce           = tg3_set_coalesce,
13613         .get_sset_count         = tg3_get_sset_count,
13614         .get_rxnfc              = tg3_get_rxnfc,
13615         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13616         .get_rxfh_indir         = tg3_get_rxfh_indir,
13617         .set_rxfh_indir         = tg3_set_rxfh_indir,
13618         .get_channels           = tg3_get_channels,
13619         .set_channels           = tg3_set_channels,
13620         .get_ts_info            = tg3_get_ts_info,
13621 };
13622
13623 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13624                                                 struct rtnl_link_stats64 *stats)
13625 {
13626         struct tg3 *tp = netdev_priv(dev);
13627
13628         spin_lock_bh(&tp->lock);
13629         if (!tp->hw_stats) {
13630                 spin_unlock_bh(&tp->lock);
13631                 return &tp->net_stats_prev;
13632         }
13633
13634         tg3_get_nstats(tp, stats);
13635         spin_unlock_bh(&tp->lock);
13636
13637         return stats;
13638 }
13639
13640 static void tg3_set_rx_mode(struct net_device *dev)
13641 {
13642         struct tg3 *tp = netdev_priv(dev);
13643
13644         if (!netif_running(dev))
13645                 return;
13646
13647         tg3_full_lock(tp, 0);
13648         __tg3_set_rx_mode(dev);
13649         tg3_full_unlock(tp);
13650 }
13651
13652 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13653                                int new_mtu)
13654 {
13655         dev->mtu = new_mtu;
13656
13657         if (new_mtu > ETH_DATA_LEN) {
13658                 if (tg3_flag(tp, 5780_CLASS)) {
13659                         netdev_update_features(dev);
13660                         tg3_flag_clear(tp, TSO_CAPABLE);
13661                 } else {
13662                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13663                 }
13664         } else {
13665                 if (tg3_flag(tp, 5780_CLASS)) {
13666                         tg3_flag_set(tp, TSO_CAPABLE);
13667                         netdev_update_features(dev);
13668                 }
13669                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13670         }
13671 }
13672
13673 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13674 {
13675         struct tg3 *tp = netdev_priv(dev);
13676         int err;
13677         bool reset_phy = false;
13678
13679         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13680                 return -EINVAL;
13681
13682         if (!netif_running(dev)) {
13683                 /* We'll just catch it later when the
13684                  * device is up'd.
13685                  */
13686                 tg3_set_mtu(dev, tp, new_mtu);
13687                 return 0;
13688         }
13689
13690         tg3_phy_stop(tp);
13691
13692         tg3_netif_stop(tp);
13693
13694         tg3_full_lock(tp, 1);
13695
13696         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13697
13698         tg3_set_mtu(dev, tp, new_mtu);
13699
13700         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13701          * breaks all requests to 256 bytes.
13702          */
13703         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13704                 reset_phy = true;
13705
13706         err = tg3_restart_hw(tp, reset_phy);
13707
13708         if (!err)
13709                 tg3_netif_start(tp);
13710
13711         tg3_full_unlock(tp);
13712
13713         if (!err)
13714                 tg3_phy_start(tp);
13715
13716         return err;
13717 }
13718
13719 static const struct net_device_ops tg3_netdev_ops = {
13720         .ndo_open               = tg3_open,
13721         .ndo_stop               = tg3_close,
13722         .ndo_start_xmit         = tg3_start_xmit,
13723         .ndo_get_stats64        = tg3_get_stats64,
13724         .ndo_validate_addr      = eth_validate_addr,
13725         .ndo_set_rx_mode        = tg3_set_rx_mode,
13726         .ndo_set_mac_address    = tg3_set_mac_addr,
13727         .ndo_do_ioctl           = tg3_ioctl,
13728         .ndo_tx_timeout         = tg3_tx_timeout,
13729         .ndo_change_mtu         = tg3_change_mtu,
13730         .ndo_fix_features       = tg3_fix_features,
13731         .ndo_set_features       = tg3_set_features,
13732 #ifdef CONFIG_NET_POLL_CONTROLLER
13733         .ndo_poll_controller    = tg3_poll_controller,
13734 #endif
13735 };
13736
13737 static void tg3_get_eeprom_size(struct tg3 *tp)
13738 {
13739         u32 cursize, val, magic;
13740
13741         tp->nvram_size = EEPROM_CHIP_SIZE;
13742
13743         if (tg3_nvram_read(tp, 0, &magic) != 0)
13744                 return;
13745
13746         if ((magic != TG3_EEPROM_MAGIC) &&
13747             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13748             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13749                 return;
13750
13751         /*
13752          * Size the chip by reading offsets at increasing powers of two.
13753          * When we encounter our validation signature, we know the addressing
13754          * has wrapped around, and thus have our chip size.
13755          */
13756         cursize = 0x10;
13757
13758         while (cursize < tp->nvram_size) {
13759                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13760                         return;
13761
13762                 if (val == magic)
13763                         break;
13764
13765                 cursize <<= 1;
13766         }
13767
13768         tp->nvram_size = cursize;
13769 }
13770
13771 static void tg3_get_nvram_size(struct tg3 *tp)
13772 {
13773         u32 val;
13774
13775         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13776                 return;
13777
13778         /* Selfboot format */
13779         if (val != TG3_EEPROM_MAGIC) {
13780                 tg3_get_eeprom_size(tp);
13781                 return;
13782         }
13783
13784         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13785                 if (val != 0) {
13786                         /* This is confusing.  We want to operate on the
13787                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13788                          * call will read from NVRAM and byteswap the data
13789                          * according to the byteswapping settings for all
13790                          * other register accesses.  This ensures the data we
13791                          * want will always reside in the lower 16-bits.
13792                          * However, the data in NVRAM is in LE format, which
13793                          * means the data from the NVRAM read will always be
13794                          * opposite the endianness of the CPU.  The 16-bit
13795                          * byteswap then brings the data to CPU endianness.
13796                          */
13797                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13798                         return;
13799                 }
13800         }
13801         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13802 }
13803
13804 static void tg3_get_nvram_info(struct tg3 *tp)
13805 {
13806         u32 nvcfg1;
13807
13808         nvcfg1 = tr32(NVRAM_CFG1);
13809         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13810                 tg3_flag_set(tp, FLASH);
13811         } else {
13812                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13813                 tw32(NVRAM_CFG1, nvcfg1);
13814         }
13815
13816         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13817             tg3_flag(tp, 5780_CLASS)) {
13818                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13819                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13820                         tp->nvram_jedecnum = JEDEC_ATMEL;
13821                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13822                         tg3_flag_set(tp, NVRAM_BUFFERED);
13823                         break;
13824                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13825                         tp->nvram_jedecnum = JEDEC_ATMEL;
13826                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13827                         break;
13828                 case FLASH_VENDOR_ATMEL_EEPROM:
13829                         tp->nvram_jedecnum = JEDEC_ATMEL;
13830                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13831                         tg3_flag_set(tp, NVRAM_BUFFERED);
13832                         break;
13833                 case FLASH_VENDOR_ST:
13834                         tp->nvram_jedecnum = JEDEC_ST;
13835                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13836                         tg3_flag_set(tp, NVRAM_BUFFERED);
13837                         break;
13838                 case FLASH_VENDOR_SAIFUN:
13839                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13840                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13841                         break;
13842                 case FLASH_VENDOR_SST_SMALL:
13843                 case FLASH_VENDOR_SST_LARGE:
13844                         tp->nvram_jedecnum = JEDEC_SST;
13845                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13846                         break;
13847                 }
13848         } else {
13849                 tp->nvram_jedecnum = JEDEC_ATMEL;
13850                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13851                 tg3_flag_set(tp, NVRAM_BUFFERED);
13852         }
13853 }
13854
13855 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13856 {
13857         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13858         case FLASH_5752PAGE_SIZE_256:
13859                 tp->nvram_pagesize = 256;
13860                 break;
13861         case FLASH_5752PAGE_SIZE_512:
13862                 tp->nvram_pagesize = 512;
13863                 break;
13864         case FLASH_5752PAGE_SIZE_1K:
13865                 tp->nvram_pagesize = 1024;
13866                 break;
13867         case FLASH_5752PAGE_SIZE_2K:
13868                 tp->nvram_pagesize = 2048;
13869                 break;
13870         case FLASH_5752PAGE_SIZE_4K:
13871                 tp->nvram_pagesize = 4096;
13872                 break;
13873         case FLASH_5752PAGE_SIZE_264:
13874                 tp->nvram_pagesize = 264;
13875                 break;
13876         case FLASH_5752PAGE_SIZE_528:
13877                 tp->nvram_pagesize = 528;
13878                 break;
13879         }
13880 }
13881
13882 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13883 {
13884         u32 nvcfg1;
13885
13886         nvcfg1 = tr32(NVRAM_CFG1);
13887
13888         /* NVRAM protection for TPM */
13889         if (nvcfg1 & (1 << 27))
13890                 tg3_flag_set(tp, PROTECTED_NVRAM);
13891
13892         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13893         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13894         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13895                 tp->nvram_jedecnum = JEDEC_ATMEL;
13896                 tg3_flag_set(tp, NVRAM_BUFFERED);
13897                 break;
13898         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13899                 tp->nvram_jedecnum = JEDEC_ATMEL;
13900                 tg3_flag_set(tp, NVRAM_BUFFERED);
13901                 tg3_flag_set(tp, FLASH);
13902                 break;
13903         case FLASH_5752VENDOR_ST_M45PE10:
13904         case FLASH_5752VENDOR_ST_M45PE20:
13905         case FLASH_5752VENDOR_ST_M45PE40:
13906                 tp->nvram_jedecnum = JEDEC_ST;
13907                 tg3_flag_set(tp, NVRAM_BUFFERED);
13908                 tg3_flag_set(tp, FLASH);
13909                 break;
13910         }
13911
13912         if (tg3_flag(tp, FLASH)) {
13913                 tg3_nvram_get_pagesize(tp, nvcfg1);
13914         } else {
13915                 /* For eeprom, set pagesize to maximum eeprom size */
13916                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13917
13918                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13919                 tw32(NVRAM_CFG1, nvcfg1);
13920         }
13921 }
13922
13923 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13924 {
13925         u32 nvcfg1, protect = 0;
13926
13927         nvcfg1 = tr32(NVRAM_CFG1);
13928
13929         /* NVRAM protection for TPM */
13930         if (nvcfg1 & (1 << 27)) {
13931                 tg3_flag_set(tp, PROTECTED_NVRAM);
13932                 protect = 1;
13933         }
13934
13935         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13936         switch (nvcfg1) {
13937         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13938         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13939         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13940         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13941                 tp->nvram_jedecnum = JEDEC_ATMEL;
13942                 tg3_flag_set(tp, NVRAM_BUFFERED);
13943                 tg3_flag_set(tp, FLASH);
13944                 tp->nvram_pagesize = 264;
13945                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13946                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13947                         tp->nvram_size = (protect ? 0x3e200 :
13948                                           TG3_NVRAM_SIZE_512KB);
13949                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13950                         tp->nvram_size = (protect ? 0x1f200 :
13951                                           TG3_NVRAM_SIZE_256KB);
13952                 else
13953                         tp->nvram_size = (protect ? 0x1f200 :
13954                                           TG3_NVRAM_SIZE_128KB);
13955                 break;
13956         case FLASH_5752VENDOR_ST_M45PE10:
13957         case FLASH_5752VENDOR_ST_M45PE20:
13958         case FLASH_5752VENDOR_ST_M45PE40:
13959                 tp->nvram_jedecnum = JEDEC_ST;
13960                 tg3_flag_set(tp, NVRAM_BUFFERED);
13961                 tg3_flag_set(tp, FLASH);
13962                 tp->nvram_pagesize = 256;
13963                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13964                         tp->nvram_size = (protect ?
13965                                           TG3_NVRAM_SIZE_64KB :
13966                                           TG3_NVRAM_SIZE_128KB);
13967                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13968                         tp->nvram_size = (protect ?
13969                                           TG3_NVRAM_SIZE_64KB :
13970                                           TG3_NVRAM_SIZE_256KB);
13971                 else
13972                         tp->nvram_size = (protect ?
13973                                           TG3_NVRAM_SIZE_128KB :
13974                                           TG3_NVRAM_SIZE_512KB);
13975                 break;
13976         }
13977 }
13978
13979 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13980 {
13981         u32 nvcfg1;
13982
13983         nvcfg1 = tr32(NVRAM_CFG1);
13984
13985         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13986         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13987         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13988         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13989         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13990                 tp->nvram_jedecnum = JEDEC_ATMEL;
13991                 tg3_flag_set(tp, NVRAM_BUFFERED);
13992                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13993
13994                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13995                 tw32(NVRAM_CFG1, nvcfg1);
13996                 break;
13997         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13998         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13999         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14000         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14001                 tp->nvram_jedecnum = JEDEC_ATMEL;
14002                 tg3_flag_set(tp, NVRAM_BUFFERED);
14003                 tg3_flag_set(tp, FLASH);
14004                 tp->nvram_pagesize = 264;
14005                 break;
14006         case FLASH_5752VENDOR_ST_M45PE10:
14007         case FLASH_5752VENDOR_ST_M45PE20:
14008         case FLASH_5752VENDOR_ST_M45PE40:
14009                 tp->nvram_jedecnum = JEDEC_ST;
14010                 tg3_flag_set(tp, NVRAM_BUFFERED);
14011                 tg3_flag_set(tp, FLASH);
14012                 tp->nvram_pagesize = 256;
14013                 break;
14014         }
14015 }
14016
14017 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14018 {
14019         u32 nvcfg1, protect = 0;
14020
14021         nvcfg1 = tr32(NVRAM_CFG1);
14022
14023         /* NVRAM protection for TPM */
14024         if (nvcfg1 & (1 << 27)) {
14025                 tg3_flag_set(tp, PROTECTED_NVRAM);
14026                 protect = 1;
14027         }
14028
14029         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14030         switch (nvcfg1) {
14031         case FLASH_5761VENDOR_ATMEL_ADB021D:
14032         case FLASH_5761VENDOR_ATMEL_ADB041D:
14033         case FLASH_5761VENDOR_ATMEL_ADB081D:
14034         case FLASH_5761VENDOR_ATMEL_ADB161D:
14035         case FLASH_5761VENDOR_ATMEL_MDB021D:
14036         case FLASH_5761VENDOR_ATMEL_MDB041D:
14037         case FLASH_5761VENDOR_ATMEL_MDB081D:
14038         case FLASH_5761VENDOR_ATMEL_MDB161D:
14039                 tp->nvram_jedecnum = JEDEC_ATMEL;
14040                 tg3_flag_set(tp, NVRAM_BUFFERED);
14041                 tg3_flag_set(tp, FLASH);
14042                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14043                 tp->nvram_pagesize = 256;
14044                 break;
14045         case FLASH_5761VENDOR_ST_A_M45PE20:
14046         case FLASH_5761VENDOR_ST_A_M45PE40:
14047         case FLASH_5761VENDOR_ST_A_M45PE80:
14048         case FLASH_5761VENDOR_ST_A_M45PE16:
14049         case FLASH_5761VENDOR_ST_M_M45PE20:
14050         case FLASH_5761VENDOR_ST_M_M45PE40:
14051         case FLASH_5761VENDOR_ST_M_M45PE80:
14052         case FLASH_5761VENDOR_ST_M_M45PE16:
14053                 tp->nvram_jedecnum = JEDEC_ST;
14054                 tg3_flag_set(tp, NVRAM_BUFFERED);
14055                 tg3_flag_set(tp, FLASH);
14056                 tp->nvram_pagesize = 256;
14057                 break;
14058         }
14059
14060         if (protect) {
14061                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14062         } else {
14063                 switch (nvcfg1) {
14064                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14065                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14066                 case FLASH_5761VENDOR_ST_A_M45PE16:
14067                 case FLASH_5761VENDOR_ST_M_M45PE16:
14068                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14069                         break;
14070                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14071                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14072                 case FLASH_5761VENDOR_ST_A_M45PE80:
14073                 case FLASH_5761VENDOR_ST_M_M45PE80:
14074                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14075                         break;
14076                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14077                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14078                 case FLASH_5761VENDOR_ST_A_M45PE40:
14079                 case FLASH_5761VENDOR_ST_M_M45PE40:
14080                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14081                         break;
14082                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14083                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14084                 case FLASH_5761VENDOR_ST_A_M45PE20:
14085                 case FLASH_5761VENDOR_ST_M_M45PE20:
14086                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14087                         break;
14088                 }
14089         }
14090 }
14091
14092 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14093 {
14094         tp->nvram_jedecnum = JEDEC_ATMEL;
14095         tg3_flag_set(tp, NVRAM_BUFFERED);
14096         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14097 }
14098
14099 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14100 {
14101         u32 nvcfg1;
14102
14103         nvcfg1 = tr32(NVRAM_CFG1);
14104
14105         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14106         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14107         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14108                 tp->nvram_jedecnum = JEDEC_ATMEL;
14109                 tg3_flag_set(tp, NVRAM_BUFFERED);
14110                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14111
14112                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14113                 tw32(NVRAM_CFG1, nvcfg1);
14114                 return;
14115         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14116         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14117         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14118         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14119         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14120         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14121         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14122                 tp->nvram_jedecnum = JEDEC_ATMEL;
14123                 tg3_flag_set(tp, NVRAM_BUFFERED);
14124                 tg3_flag_set(tp, FLASH);
14125
14126                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14127                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14128                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14129                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14130                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14131                         break;
14132                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14133                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14134                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14135                         break;
14136                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14137                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14138                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14139                         break;
14140                 }
14141                 break;
14142         case FLASH_5752VENDOR_ST_M45PE10:
14143         case FLASH_5752VENDOR_ST_M45PE20:
14144         case FLASH_5752VENDOR_ST_M45PE40:
14145                 tp->nvram_jedecnum = JEDEC_ST;
14146                 tg3_flag_set(tp, NVRAM_BUFFERED);
14147                 tg3_flag_set(tp, FLASH);
14148
14149                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14150                 case FLASH_5752VENDOR_ST_M45PE10:
14151                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14152                         break;
14153                 case FLASH_5752VENDOR_ST_M45PE20:
14154                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14155                         break;
14156                 case FLASH_5752VENDOR_ST_M45PE40:
14157                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14158                         break;
14159                 }
14160                 break;
14161         default:
14162                 tg3_flag_set(tp, NO_NVRAM);
14163                 return;
14164         }
14165
14166         tg3_nvram_get_pagesize(tp, nvcfg1);
14167         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14168                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14169 }
14170
14171
14172 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14173 {
14174         u32 nvcfg1;
14175
14176         nvcfg1 = tr32(NVRAM_CFG1);
14177
14178         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14179         case FLASH_5717VENDOR_ATMEL_EEPROM:
14180         case FLASH_5717VENDOR_MICRO_EEPROM:
14181                 tp->nvram_jedecnum = JEDEC_ATMEL;
14182                 tg3_flag_set(tp, NVRAM_BUFFERED);
14183                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14184
14185                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14186                 tw32(NVRAM_CFG1, nvcfg1);
14187                 return;
14188         case FLASH_5717VENDOR_ATMEL_MDB011D:
14189         case FLASH_5717VENDOR_ATMEL_ADB011B:
14190         case FLASH_5717VENDOR_ATMEL_ADB011D:
14191         case FLASH_5717VENDOR_ATMEL_MDB021D:
14192         case FLASH_5717VENDOR_ATMEL_ADB021B:
14193         case FLASH_5717VENDOR_ATMEL_ADB021D:
14194         case FLASH_5717VENDOR_ATMEL_45USPT:
14195                 tp->nvram_jedecnum = JEDEC_ATMEL;
14196                 tg3_flag_set(tp, NVRAM_BUFFERED);
14197                 tg3_flag_set(tp, FLASH);
14198
14199                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14200                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14201                         /* Detect size with tg3_nvram_get_size() */
14202                         break;
14203                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14204                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14205                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14206                         break;
14207                 default:
14208                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14209                         break;
14210                 }
14211                 break;
14212         case FLASH_5717VENDOR_ST_M_M25PE10:
14213         case FLASH_5717VENDOR_ST_A_M25PE10:
14214         case FLASH_5717VENDOR_ST_M_M45PE10:
14215         case FLASH_5717VENDOR_ST_A_M45PE10:
14216         case FLASH_5717VENDOR_ST_M_M25PE20:
14217         case FLASH_5717VENDOR_ST_A_M25PE20:
14218         case FLASH_5717VENDOR_ST_M_M45PE20:
14219         case FLASH_5717VENDOR_ST_A_M45PE20:
14220         case FLASH_5717VENDOR_ST_25USPT:
14221         case FLASH_5717VENDOR_ST_45USPT:
14222                 tp->nvram_jedecnum = JEDEC_ST;
14223                 tg3_flag_set(tp, NVRAM_BUFFERED);
14224                 tg3_flag_set(tp, FLASH);
14225
14226                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14227                 case FLASH_5717VENDOR_ST_M_M25PE20:
14228                 case FLASH_5717VENDOR_ST_M_M45PE20:
14229                         /* Detect size with tg3_nvram_get_size() */
14230                         break;
14231                 case FLASH_5717VENDOR_ST_A_M25PE20:
14232                 case FLASH_5717VENDOR_ST_A_M45PE20:
14233                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14234                         break;
14235                 default:
14236                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14237                         break;
14238                 }
14239                 break;
14240         default:
14241                 tg3_flag_set(tp, NO_NVRAM);
14242                 return;
14243         }
14244
14245         tg3_nvram_get_pagesize(tp, nvcfg1);
14246         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14247                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14248 }
14249
14250 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14251 {
14252         u32 nvcfg1, nvmpinstrp;
14253
14254         nvcfg1 = tr32(NVRAM_CFG1);
14255         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14256
14257         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14258                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14259                         tg3_flag_set(tp, NO_NVRAM);
14260                         return;
14261                 }
14262
14263                 switch (nvmpinstrp) {
14264                 case FLASH_5762_EEPROM_HD:
14265                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14266                         break;
14267                 case FLASH_5762_EEPROM_LD:
14268                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14269                         break;
14270                 case FLASH_5720VENDOR_M_ST_M45PE20:
14271                         /* This pinstrap supports multiple sizes, so force it
14272                          * to read the actual size from location 0xf0.
14273                          */
14274                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14275                         break;
14276                 }
14277         }
14278
14279         switch (nvmpinstrp) {
14280         case FLASH_5720_EEPROM_HD:
14281         case FLASH_5720_EEPROM_LD:
14282                 tp->nvram_jedecnum = JEDEC_ATMEL;
14283                 tg3_flag_set(tp, NVRAM_BUFFERED);
14284
14285                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14286                 tw32(NVRAM_CFG1, nvcfg1);
14287                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14288                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14289                 else
14290                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14291                 return;
14292         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14293         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14294         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14295         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14296         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14297         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14298         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14299         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14300         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14301         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14302         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14303         case FLASH_5720VENDOR_ATMEL_45USPT:
14304                 tp->nvram_jedecnum = JEDEC_ATMEL;
14305                 tg3_flag_set(tp, NVRAM_BUFFERED);
14306                 tg3_flag_set(tp, FLASH);
14307
14308                 switch (nvmpinstrp) {
14309                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14310                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14311                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14312                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14313                         break;
14314                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14315                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14316                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14317                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14318                         break;
14319                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14320                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14321                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14322                         break;
14323                 default:
14324                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14325                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14326                         break;
14327                 }
14328                 break;
14329         case FLASH_5720VENDOR_M_ST_M25PE10:
14330         case FLASH_5720VENDOR_M_ST_M45PE10:
14331         case FLASH_5720VENDOR_A_ST_M25PE10:
14332         case FLASH_5720VENDOR_A_ST_M45PE10:
14333         case FLASH_5720VENDOR_M_ST_M25PE20:
14334         case FLASH_5720VENDOR_M_ST_M45PE20:
14335         case FLASH_5720VENDOR_A_ST_M25PE20:
14336         case FLASH_5720VENDOR_A_ST_M45PE20:
14337         case FLASH_5720VENDOR_M_ST_M25PE40:
14338         case FLASH_5720VENDOR_M_ST_M45PE40:
14339         case FLASH_5720VENDOR_A_ST_M25PE40:
14340         case FLASH_5720VENDOR_A_ST_M45PE40:
14341         case FLASH_5720VENDOR_M_ST_M25PE80:
14342         case FLASH_5720VENDOR_M_ST_M45PE80:
14343         case FLASH_5720VENDOR_A_ST_M25PE80:
14344         case FLASH_5720VENDOR_A_ST_M45PE80:
14345         case FLASH_5720VENDOR_ST_25USPT:
14346         case FLASH_5720VENDOR_ST_45USPT:
14347                 tp->nvram_jedecnum = JEDEC_ST;
14348                 tg3_flag_set(tp, NVRAM_BUFFERED);
14349                 tg3_flag_set(tp, FLASH);
14350
14351                 switch (nvmpinstrp) {
14352                 case FLASH_5720VENDOR_M_ST_M25PE20:
14353                 case FLASH_5720VENDOR_M_ST_M45PE20:
14354                 case FLASH_5720VENDOR_A_ST_M25PE20:
14355                 case FLASH_5720VENDOR_A_ST_M45PE20:
14356                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14357                         break;
14358                 case FLASH_5720VENDOR_M_ST_M25PE40:
14359                 case FLASH_5720VENDOR_M_ST_M45PE40:
14360                 case FLASH_5720VENDOR_A_ST_M25PE40:
14361                 case FLASH_5720VENDOR_A_ST_M45PE40:
14362                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14363                         break;
14364                 case FLASH_5720VENDOR_M_ST_M25PE80:
14365                 case FLASH_5720VENDOR_M_ST_M45PE80:
14366                 case FLASH_5720VENDOR_A_ST_M25PE80:
14367                 case FLASH_5720VENDOR_A_ST_M45PE80:
14368                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14369                         break;
14370                 default:
14371                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14372                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14373                         break;
14374                 }
14375                 break;
14376         default:
14377                 tg3_flag_set(tp, NO_NVRAM);
14378                 return;
14379         }
14380
14381         tg3_nvram_get_pagesize(tp, nvcfg1);
14382         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14383                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14384
14385         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14386                 u32 val;
14387
14388                 if (tg3_nvram_read(tp, 0, &val))
14389                         return;
14390
14391                 if (val != TG3_EEPROM_MAGIC &&
14392                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14393                         tg3_flag_set(tp, NO_NVRAM);
14394         }
14395 }
14396
14397 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14398 static void tg3_nvram_init(struct tg3 *tp)
14399 {
14400         if (tg3_flag(tp, IS_SSB_CORE)) {
14401                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14402                 tg3_flag_clear(tp, NVRAM);
14403                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14404                 tg3_flag_set(tp, NO_NVRAM);
14405                 return;
14406         }
14407
14408         tw32_f(GRC_EEPROM_ADDR,
14409              (EEPROM_ADDR_FSM_RESET |
14410               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14411                EEPROM_ADDR_CLKPERD_SHIFT)));
14412
14413         msleep(1);
14414
14415         /* Enable seeprom accesses. */
14416         tw32_f(GRC_LOCAL_CTRL,
14417              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14418         udelay(100);
14419
14420         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14421             tg3_asic_rev(tp) != ASIC_REV_5701) {
14422                 tg3_flag_set(tp, NVRAM);
14423
14424                 if (tg3_nvram_lock(tp)) {
14425                         netdev_warn(tp->dev,
14426                                     "Cannot get nvram lock, %s failed\n",
14427                                     __func__);
14428                         return;
14429                 }
14430                 tg3_enable_nvram_access(tp);
14431
14432                 tp->nvram_size = 0;
14433
14434                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14435                         tg3_get_5752_nvram_info(tp);
14436                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14437                         tg3_get_5755_nvram_info(tp);
14438                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14439                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14440                          tg3_asic_rev(tp) == ASIC_REV_5785)
14441                         tg3_get_5787_nvram_info(tp);
14442                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14443                         tg3_get_5761_nvram_info(tp);
14444                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14445                         tg3_get_5906_nvram_info(tp);
14446                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14447                          tg3_flag(tp, 57765_CLASS))
14448                         tg3_get_57780_nvram_info(tp);
14449                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14450                          tg3_asic_rev(tp) == ASIC_REV_5719)
14451                         tg3_get_5717_nvram_info(tp);
14452                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14453                          tg3_asic_rev(tp) == ASIC_REV_5762)
14454                         tg3_get_5720_nvram_info(tp);
14455                 else
14456                         tg3_get_nvram_info(tp);
14457
14458                 if (tp->nvram_size == 0)
14459                         tg3_get_nvram_size(tp);
14460
14461                 tg3_disable_nvram_access(tp);
14462                 tg3_nvram_unlock(tp);
14463
14464         } else {
14465                 tg3_flag_clear(tp, NVRAM);
14466                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14467
14468                 tg3_get_eeprom_size(tp);
14469         }
14470 }
14471
14472 struct subsys_tbl_ent {
14473         u16 subsys_vendor, subsys_devid;
14474         u32 phy_id;
14475 };
14476
14477 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14478         /* Broadcom boards. */
14479         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14480           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14481         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14482           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14483         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14484           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14485         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14486           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14487         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14488           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14489         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14490           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14491         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14492           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14493         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14494           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14495         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14496           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14497         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14498           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14499         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14500           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14501
14502         /* 3com boards. */
14503         { TG3PCI_SUBVENDOR_ID_3COM,
14504           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14505         { TG3PCI_SUBVENDOR_ID_3COM,
14506           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14507         { TG3PCI_SUBVENDOR_ID_3COM,
14508           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14509         { TG3PCI_SUBVENDOR_ID_3COM,
14510           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14511         { TG3PCI_SUBVENDOR_ID_3COM,
14512           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14513
14514         /* DELL boards. */
14515         { TG3PCI_SUBVENDOR_ID_DELL,
14516           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14517         { TG3PCI_SUBVENDOR_ID_DELL,
14518           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14519         { TG3PCI_SUBVENDOR_ID_DELL,
14520           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14521         { TG3PCI_SUBVENDOR_ID_DELL,
14522           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14523
14524         /* Compaq boards. */
14525         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14526           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14527         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14528           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14529         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14530           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14531         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14532           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14533         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14534           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14535
14536         /* IBM boards. */
14537         { TG3PCI_SUBVENDOR_ID_IBM,
14538           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14539 };
14540
14541 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14542 {
14543         int i;
14544
14545         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14546                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14547                      tp->pdev->subsystem_vendor) &&
14548                     (subsys_id_to_phy_id[i].subsys_devid ==
14549                      tp->pdev->subsystem_device))
14550                         return &subsys_id_to_phy_id[i];
14551         }
14552         return NULL;
14553 }
14554
14555 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14556 {
14557         u32 val;
14558
14559         tp->phy_id = TG3_PHY_ID_INVALID;
14560         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14561
14562         /* Assume an onboard device and WOL capable by default.  */
14563         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14564         tg3_flag_set(tp, WOL_CAP);
14565
14566         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14567                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14568                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14569                         tg3_flag_set(tp, IS_NIC);
14570                 }
14571                 val = tr32(VCPU_CFGSHDW);
14572                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14573                         tg3_flag_set(tp, ASPM_WORKAROUND);
14574                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14575                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14576                         tg3_flag_set(tp, WOL_ENABLE);
14577                         device_set_wakeup_enable(&tp->pdev->dev, true);
14578                 }
14579                 goto done;
14580         }
14581
14582         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14583         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14584                 u32 nic_cfg, led_cfg;
14585                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14586                 int eeprom_phy_serdes = 0;
14587
14588                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14589                 tp->nic_sram_data_cfg = nic_cfg;
14590
14591                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14592                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14593                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14594                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14595                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14596                     (ver > 0) && (ver < 0x100))
14597                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14598
14599                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14600                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14601
14602                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14603                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14604                         eeprom_phy_serdes = 1;
14605
14606                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14607                 if (nic_phy_id != 0) {
14608                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14609                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14610
14611                         eeprom_phy_id  = (id1 >> 16) << 10;
14612                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14613                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14614                 } else
14615                         eeprom_phy_id = 0;
14616
14617                 tp->phy_id = eeprom_phy_id;
14618                 if (eeprom_phy_serdes) {
14619                         if (!tg3_flag(tp, 5705_PLUS))
14620                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14621                         else
14622                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14623                 }
14624
14625                 if (tg3_flag(tp, 5750_PLUS))
14626                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14627                                     SHASTA_EXT_LED_MODE_MASK);
14628                 else
14629                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14630
14631                 switch (led_cfg) {
14632                 default:
14633                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14634                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14635                         break;
14636
14637                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14638                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14639                         break;
14640
14641                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14642                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14643
14644                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14645                          * read on some older 5700/5701 bootcode.
14646                          */
14647                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14648                             tg3_asic_rev(tp) == ASIC_REV_5701)
14649                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14650
14651                         break;
14652
14653                 case SHASTA_EXT_LED_SHARED:
14654                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14655                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14656                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14657                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14658                                                  LED_CTRL_MODE_PHY_2);
14659                         break;
14660
14661                 case SHASTA_EXT_LED_MAC:
14662                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14663                         break;
14664
14665                 case SHASTA_EXT_LED_COMBO:
14666                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14667                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14668                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14669                                                  LED_CTRL_MODE_PHY_2);
14670                         break;
14671
14672                 }
14673
14674                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14675                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14676                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14677                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14678
14679                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14680                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14681
14682                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14683                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14684                         if ((tp->pdev->subsystem_vendor ==
14685                              PCI_VENDOR_ID_ARIMA) &&
14686                             (tp->pdev->subsystem_device == 0x205a ||
14687                              tp->pdev->subsystem_device == 0x2063))
14688                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14689                 } else {
14690                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14691                         tg3_flag_set(tp, IS_NIC);
14692                 }
14693
14694                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14695                         tg3_flag_set(tp, ENABLE_ASF);
14696                         if (tg3_flag(tp, 5750_PLUS))
14697                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14698                 }
14699
14700                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14701                     tg3_flag(tp, 5750_PLUS))
14702                         tg3_flag_set(tp, ENABLE_APE);
14703
14704                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14705                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14706                         tg3_flag_clear(tp, WOL_CAP);
14707
14708                 if (tg3_flag(tp, WOL_CAP) &&
14709                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14710                         tg3_flag_set(tp, WOL_ENABLE);
14711                         device_set_wakeup_enable(&tp->pdev->dev, true);
14712                 }
14713
14714                 if (cfg2 & (1 << 17))
14715                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14716
14717                 /* serdes signal pre-emphasis in register 0x590 set by */
14718                 /* bootcode if bit 18 is set */
14719                 if (cfg2 & (1 << 18))
14720                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14721
14722                 if ((tg3_flag(tp, 57765_PLUS) ||
14723                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14724                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14725                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14726                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14727
14728                 if (tg3_flag(tp, PCI_EXPRESS)) {
14729                         u32 cfg3;
14730
14731                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14732                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14733                             !tg3_flag(tp, 57765_PLUS) &&
14734                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14735                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14736                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14737                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14738                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14739                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14740                 }
14741
14742                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14743                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14744                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14745                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14746                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14747                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14748         }
14749 done:
14750         if (tg3_flag(tp, WOL_CAP))
14751                 device_set_wakeup_enable(&tp->pdev->dev,
14752                                          tg3_flag(tp, WOL_ENABLE));
14753         else
14754                 device_set_wakeup_capable(&tp->pdev->dev, false);
14755 }
14756
14757 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14758 {
14759         int i, err;
14760         u32 val2, off = offset * 8;
14761
14762         err = tg3_nvram_lock(tp);
14763         if (err)
14764                 return err;
14765
14766         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14767         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14768                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14769         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14770         udelay(10);
14771
14772         for (i = 0; i < 100; i++) {
14773                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14774                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14775                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14776                         break;
14777                 }
14778                 udelay(10);
14779         }
14780
14781         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14782
14783         tg3_nvram_unlock(tp);
14784         if (val2 & APE_OTP_STATUS_CMD_DONE)
14785                 return 0;
14786
14787         return -EBUSY;
14788 }
14789
14790 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14791 {
14792         int i;
14793         u32 val;
14794
14795         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14796         tw32(OTP_CTRL, cmd);
14797
14798         /* Wait for up to 1 ms for command to execute. */
14799         for (i = 0; i < 100; i++) {
14800                 val = tr32(OTP_STATUS);
14801                 if (val & OTP_STATUS_CMD_DONE)
14802                         break;
14803                 udelay(10);
14804         }
14805
14806         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14807 }
14808
14809 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14810  * configuration is a 32-bit value that straddles the alignment boundary.
14811  * We do two 32-bit reads and then shift and merge the results.
14812  */
14813 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14814 {
14815         u32 bhalf_otp, thalf_otp;
14816
14817         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14818
14819         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14820                 return 0;
14821
14822         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14823
14824         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14825                 return 0;
14826
14827         thalf_otp = tr32(OTP_READ_DATA);
14828
14829         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14830
14831         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14832                 return 0;
14833
14834         bhalf_otp = tr32(OTP_READ_DATA);
14835
14836         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14837 }
14838
14839 static void tg3_phy_init_link_config(struct tg3 *tp)
14840 {
14841         u32 adv = ADVERTISED_Autoneg;
14842
14843         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14844                 adv |= ADVERTISED_1000baseT_Half |
14845                        ADVERTISED_1000baseT_Full;
14846
14847         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14848                 adv |= ADVERTISED_100baseT_Half |
14849                        ADVERTISED_100baseT_Full |
14850                        ADVERTISED_10baseT_Half |
14851                        ADVERTISED_10baseT_Full |
14852                        ADVERTISED_TP;
14853         else
14854                 adv |= ADVERTISED_FIBRE;
14855
14856         tp->link_config.advertising = adv;
14857         tp->link_config.speed = SPEED_UNKNOWN;
14858         tp->link_config.duplex = DUPLEX_UNKNOWN;
14859         tp->link_config.autoneg = AUTONEG_ENABLE;
14860         tp->link_config.active_speed = SPEED_UNKNOWN;
14861         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14862
14863         tp->old_link = -1;
14864 }
14865
14866 static int tg3_phy_probe(struct tg3 *tp)
14867 {
14868         u32 hw_phy_id_1, hw_phy_id_2;
14869         u32 hw_phy_id, hw_phy_id_masked;
14870         int err;
14871
14872         /* flow control autonegotiation is default behavior */
14873         tg3_flag_set(tp, PAUSE_AUTONEG);
14874         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14875
14876         if (tg3_flag(tp, ENABLE_APE)) {
14877                 switch (tp->pci_fn) {
14878                 case 0:
14879                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14880                         break;
14881                 case 1:
14882                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14883                         break;
14884                 case 2:
14885                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14886                         break;
14887                 case 3:
14888                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14889                         break;
14890                 }
14891         }
14892
14893         if (!tg3_flag(tp, ENABLE_ASF) &&
14894             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14895             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14896                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14897                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14898
14899         if (tg3_flag(tp, USE_PHYLIB))
14900                 return tg3_phy_init(tp);
14901
14902         /* Reading the PHY ID register can conflict with ASF
14903          * firmware access to the PHY hardware.
14904          */
14905         err = 0;
14906         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14907                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14908         } else {
14909                 /* Now read the physical PHY_ID from the chip and verify
14910                  * that it is sane.  If it doesn't look good, we fall back
14911                  * to either the hard-coded table based PHY_ID and failing
14912                  * that the value found in the eeprom area.
14913                  */
14914                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14915                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14916
14917                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14918                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14919                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14920
14921                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14922         }
14923
14924         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14925                 tp->phy_id = hw_phy_id;
14926                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14927                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14928                 else
14929                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14930         } else {
14931                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14932                         /* Do nothing, phy ID already set up in
14933                          * tg3_get_eeprom_hw_cfg().
14934                          */
14935                 } else {
14936                         struct subsys_tbl_ent *p;
14937
14938                         /* No eeprom signature?  Try the hardcoded
14939                          * subsys device table.
14940                          */
14941                         p = tg3_lookup_by_subsys(tp);
14942                         if (p) {
14943                                 tp->phy_id = p->phy_id;
14944                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14945                                 /* For now we saw the IDs 0xbc050cd0,
14946                                  * 0xbc050f80 and 0xbc050c30 on devices
14947                                  * connected to an BCM4785 and there are
14948                                  * probably more. Just assume that the phy is
14949                                  * supported when it is connected to a SSB core
14950                                  * for now.
14951                                  */
14952                                 return -ENODEV;
14953                         }
14954
14955                         if (!tp->phy_id ||
14956                             tp->phy_id == TG3_PHY_ID_BCM8002)
14957                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14958                 }
14959         }
14960
14961         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14962             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14963              tg3_asic_rev(tp) == ASIC_REV_5720 ||
14964              tg3_asic_rev(tp) == ASIC_REV_57766 ||
14965              tg3_asic_rev(tp) == ASIC_REV_5762 ||
14966              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14967               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14968              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14969               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14970                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14971
14972         tg3_phy_init_link_config(tp);
14973
14974         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
14975             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14976             !tg3_flag(tp, ENABLE_APE) &&
14977             !tg3_flag(tp, ENABLE_ASF)) {
14978                 u32 bmsr, dummy;
14979
14980                 tg3_readphy(tp, MII_BMSR, &bmsr);
14981                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14982                     (bmsr & BMSR_LSTATUS))
14983                         goto skip_phy_reset;
14984
14985                 err = tg3_phy_reset(tp);
14986                 if (err)
14987                         return err;
14988
14989                 tg3_phy_set_wirespeed(tp);
14990
14991                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14992                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14993                                             tp->link_config.flowctrl);
14994
14995                         tg3_writephy(tp, MII_BMCR,
14996                                      BMCR_ANENABLE | BMCR_ANRESTART);
14997                 }
14998         }
14999
15000 skip_phy_reset:
15001         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15002                 err = tg3_init_5401phy_dsp(tp);
15003                 if (err)
15004                         return err;
15005
15006                 err = tg3_init_5401phy_dsp(tp);
15007         }
15008
15009         return err;
15010 }
15011
15012 static void tg3_read_vpd(struct tg3 *tp)
15013 {
15014         u8 *vpd_data;
15015         unsigned int block_end, rosize, len;
15016         u32 vpdlen;
15017         int j, i = 0;
15018
15019         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15020         if (!vpd_data)
15021                 goto out_no_vpd;
15022
15023         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15024         if (i < 0)
15025                 goto out_not_found;
15026
15027         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15028         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15029         i += PCI_VPD_LRDT_TAG_SIZE;
15030
15031         if (block_end > vpdlen)
15032                 goto out_not_found;
15033
15034         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15035                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15036         if (j > 0) {
15037                 len = pci_vpd_info_field_size(&vpd_data[j]);
15038
15039                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15040                 if (j + len > block_end || len != 4 ||
15041                     memcmp(&vpd_data[j], "1028", 4))
15042                         goto partno;
15043
15044                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15045                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15046                 if (j < 0)
15047                         goto partno;
15048
15049                 len = pci_vpd_info_field_size(&vpd_data[j]);
15050
15051                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15052                 if (j + len > block_end)
15053                         goto partno;
15054
15055                 if (len >= sizeof(tp->fw_ver))
15056                         len = sizeof(tp->fw_ver) - 1;
15057                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15058                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15059                          &vpd_data[j]);
15060         }
15061
15062 partno:
15063         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15064                                       PCI_VPD_RO_KEYWORD_PARTNO);
15065         if (i < 0)
15066                 goto out_not_found;
15067
15068         len = pci_vpd_info_field_size(&vpd_data[i]);
15069
15070         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15071         if (len > TG3_BPN_SIZE ||
15072             (len + i) > vpdlen)
15073                 goto out_not_found;
15074
15075         memcpy(tp->board_part_number, &vpd_data[i], len);
15076
15077 out_not_found:
15078         kfree(vpd_data);
15079         if (tp->board_part_number[0])
15080                 return;
15081
15082 out_no_vpd:
15083         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15084                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15085                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15086                         strcpy(tp->board_part_number, "BCM5717");
15087                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15088                         strcpy(tp->board_part_number, "BCM5718");
15089                 else
15090                         goto nomatch;
15091         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15092                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15093                         strcpy(tp->board_part_number, "BCM57780");
15094                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15095                         strcpy(tp->board_part_number, "BCM57760");
15096                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15097                         strcpy(tp->board_part_number, "BCM57790");
15098                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15099                         strcpy(tp->board_part_number, "BCM57788");
15100                 else
15101                         goto nomatch;
15102         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15103                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15104                         strcpy(tp->board_part_number, "BCM57761");
15105                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15106                         strcpy(tp->board_part_number, "BCM57765");
15107                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15108                         strcpy(tp->board_part_number, "BCM57781");
15109                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15110                         strcpy(tp->board_part_number, "BCM57785");
15111                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15112                         strcpy(tp->board_part_number, "BCM57791");
15113                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15114                         strcpy(tp->board_part_number, "BCM57795");
15115                 else
15116                         goto nomatch;
15117         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15118                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15119                         strcpy(tp->board_part_number, "BCM57762");
15120                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15121                         strcpy(tp->board_part_number, "BCM57766");
15122                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15123                         strcpy(tp->board_part_number, "BCM57782");
15124                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15125                         strcpy(tp->board_part_number, "BCM57786");
15126                 else
15127                         goto nomatch;
15128         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15129                 strcpy(tp->board_part_number, "BCM95906");
15130         } else {
15131 nomatch:
15132                 strcpy(tp->board_part_number, "none");
15133         }
15134 }
15135
15136 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15137 {
15138         u32 val;
15139
15140         if (tg3_nvram_read(tp, offset, &val) ||
15141             (val & 0xfc000000) != 0x0c000000 ||
15142             tg3_nvram_read(tp, offset + 4, &val) ||
15143             val != 0)
15144                 return 0;
15145
15146         return 1;
15147 }
15148
15149 static void tg3_read_bc_ver(struct tg3 *tp)
15150 {
15151         u32 val, offset, start, ver_offset;
15152         int i, dst_off;
15153         bool newver = false;
15154
15155         if (tg3_nvram_read(tp, 0xc, &offset) ||
15156             tg3_nvram_read(tp, 0x4, &start))
15157                 return;
15158
15159         offset = tg3_nvram_logical_addr(tp, offset);
15160
15161         if (tg3_nvram_read(tp, offset, &val))
15162                 return;
15163
15164         if ((val & 0xfc000000) == 0x0c000000) {
15165                 if (tg3_nvram_read(tp, offset + 4, &val))
15166                         return;
15167
15168                 if (val == 0)
15169                         newver = true;
15170         }
15171
15172         dst_off = strlen(tp->fw_ver);
15173
15174         if (newver) {
15175                 if (TG3_VER_SIZE - dst_off < 16 ||
15176                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15177                         return;
15178
15179                 offset = offset + ver_offset - start;
15180                 for (i = 0; i < 16; i += 4) {
15181                         __be32 v;
15182                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15183                                 return;
15184
15185                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15186                 }
15187         } else {
15188                 u32 major, minor;
15189
15190                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15191                         return;
15192
15193                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15194                         TG3_NVM_BCVER_MAJSFT;
15195                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15196                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15197                          "v%d.%02d", major, minor);
15198         }
15199 }
15200
15201 static void tg3_read_hwsb_ver(struct tg3 *tp)
15202 {
15203         u32 val, major, minor;
15204
15205         /* Use native endian representation */
15206         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15207                 return;
15208
15209         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15210                 TG3_NVM_HWSB_CFG1_MAJSFT;
15211         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15212                 TG3_NVM_HWSB_CFG1_MINSFT;
15213
15214         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15215 }
15216
15217 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15218 {
15219         u32 offset, major, minor, build;
15220
15221         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15222
15223         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15224                 return;
15225
15226         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15227         case TG3_EEPROM_SB_REVISION_0:
15228                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15229                 break;
15230         case TG3_EEPROM_SB_REVISION_2:
15231                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15232                 break;
15233         case TG3_EEPROM_SB_REVISION_3:
15234                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15235                 break;
15236         case TG3_EEPROM_SB_REVISION_4:
15237                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15238                 break;
15239         case TG3_EEPROM_SB_REVISION_5:
15240                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15241                 break;
15242         case TG3_EEPROM_SB_REVISION_6:
15243                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15244                 break;
15245         default:
15246                 return;
15247         }
15248
15249         if (tg3_nvram_read(tp, offset, &val))
15250                 return;
15251
15252         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15253                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15254         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15255                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15256         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15257
15258         if (minor > 99 || build > 26)
15259                 return;
15260
15261         offset = strlen(tp->fw_ver);
15262         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15263                  " v%d.%02d", major, minor);
15264
15265         if (build > 0) {
15266                 offset = strlen(tp->fw_ver);
15267                 if (offset < TG3_VER_SIZE - 1)
15268                         tp->fw_ver[offset] = 'a' + build - 1;
15269         }
15270 }
15271
15272 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15273 {
15274         u32 val, offset, start;
15275         int i, vlen;
15276
15277         for (offset = TG3_NVM_DIR_START;
15278              offset < TG3_NVM_DIR_END;
15279              offset += TG3_NVM_DIRENT_SIZE) {
15280                 if (tg3_nvram_read(tp, offset, &val))
15281                         return;
15282
15283                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15284                         break;
15285         }
15286
15287         if (offset == TG3_NVM_DIR_END)
15288                 return;
15289
15290         if (!tg3_flag(tp, 5705_PLUS))
15291                 start = 0x08000000;
15292         else if (tg3_nvram_read(tp, offset - 4, &start))
15293                 return;
15294
15295         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15296             !tg3_fw_img_is_valid(tp, offset) ||
15297             tg3_nvram_read(tp, offset + 8, &val))
15298                 return;
15299
15300         offset += val - start;
15301
15302         vlen = strlen(tp->fw_ver);
15303
15304         tp->fw_ver[vlen++] = ',';
15305         tp->fw_ver[vlen++] = ' ';
15306
15307         for (i = 0; i < 4; i++) {
15308                 __be32 v;
15309                 if (tg3_nvram_read_be32(tp, offset, &v))
15310                         return;
15311
15312                 offset += sizeof(v);
15313
15314                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15315                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15316                         break;
15317                 }
15318
15319                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15320                 vlen += sizeof(v);
15321         }
15322 }
15323
15324 static void tg3_probe_ncsi(struct tg3 *tp)
15325 {
15326         u32 apedata;
15327
15328         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15329         if (apedata != APE_SEG_SIG_MAGIC)
15330                 return;
15331
15332         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15333         if (!(apedata & APE_FW_STATUS_READY))
15334                 return;
15335
15336         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15337                 tg3_flag_set(tp, APE_HAS_NCSI);
15338 }
15339
15340 static void tg3_read_dash_ver(struct tg3 *tp)
15341 {
15342         int vlen;
15343         u32 apedata;
15344         char *fwtype;
15345
15346         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15347
15348         if (tg3_flag(tp, APE_HAS_NCSI))
15349                 fwtype = "NCSI";
15350         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15351                 fwtype = "SMASH";
15352         else
15353                 fwtype = "DASH";
15354
15355         vlen = strlen(tp->fw_ver);
15356
15357         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15358                  fwtype,
15359                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15360                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15361                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15362                  (apedata & APE_FW_VERSION_BLDMSK));
15363 }
15364
15365 static void tg3_read_otp_ver(struct tg3 *tp)
15366 {
15367         u32 val, val2;
15368
15369         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15370                 return;
15371
15372         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15373             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15374             TG3_OTP_MAGIC0_VALID(val)) {
15375                 u64 val64 = (u64) val << 32 | val2;
15376                 u32 ver = 0;
15377                 int i, vlen;
15378
15379                 for (i = 0; i < 7; i++) {
15380                         if ((val64 & 0xff) == 0)
15381                                 break;
15382                         ver = val64 & 0xff;
15383                         val64 >>= 8;
15384                 }
15385                 vlen = strlen(tp->fw_ver);
15386                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15387         }
15388 }
15389
15390 static void tg3_read_fw_ver(struct tg3 *tp)
15391 {
15392         u32 val;
15393         bool vpd_vers = false;
15394
15395         if (tp->fw_ver[0] != 0)
15396                 vpd_vers = true;
15397
15398         if (tg3_flag(tp, NO_NVRAM)) {
15399                 strcat(tp->fw_ver, "sb");
15400                 tg3_read_otp_ver(tp);
15401                 return;
15402         }
15403
15404         if (tg3_nvram_read(tp, 0, &val))
15405                 return;
15406
15407         if (val == TG3_EEPROM_MAGIC)
15408                 tg3_read_bc_ver(tp);
15409         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15410                 tg3_read_sb_ver(tp, val);
15411         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15412                 tg3_read_hwsb_ver(tp);
15413
15414         if (tg3_flag(tp, ENABLE_ASF)) {
15415                 if (tg3_flag(tp, ENABLE_APE)) {
15416                         tg3_probe_ncsi(tp);
15417                         if (!vpd_vers)
15418                                 tg3_read_dash_ver(tp);
15419                 } else if (!vpd_vers) {
15420                         tg3_read_mgmtfw_ver(tp);
15421                 }
15422         }
15423
15424         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15425 }
15426
15427 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15428 {
15429         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15430                 return TG3_RX_RET_MAX_SIZE_5717;
15431         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15432                 return TG3_RX_RET_MAX_SIZE_5700;
15433         else
15434                 return TG3_RX_RET_MAX_SIZE_5705;
15435 }
15436
15437 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15438         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15439         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15440         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15441         { },
15442 };
15443
15444 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15445 {
15446         struct pci_dev *peer;
15447         unsigned int func, devnr = tp->pdev->devfn & ~7;
15448
15449         for (func = 0; func < 8; func++) {
15450                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15451                 if (peer && peer != tp->pdev)
15452                         break;
15453                 pci_dev_put(peer);
15454         }
15455         /* 5704 can be configured in single-port mode, set peer to
15456          * tp->pdev in that case.
15457          */
15458         if (!peer) {
15459                 peer = tp->pdev;
15460                 return peer;
15461         }
15462
15463         /*
15464          * We don't need to keep the refcount elevated; there's no way
15465          * to remove one half of this device without removing the other
15466          */
15467         pci_dev_put(peer);
15468
15469         return peer;
15470 }
15471
15472 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15473 {
15474         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15475         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15476                 u32 reg;
15477
15478                 /* All devices that use the alternate
15479                  * ASIC REV location have a CPMU.
15480                  */
15481                 tg3_flag_set(tp, CPMU_PRESENT);
15482
15483                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15484                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15485                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15486                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15487                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15488                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15489                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15490                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15491                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15492                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15493                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15494                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15495                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15496                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15497                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15498                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15499                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15500                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15501                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15502                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15503                 else
15504                         reg = TG3PCI_PRODID_ASICREV;
15505
15506                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15507         }
15508
15509         /* Wrong chip ID in 5752 A0. This code can be removed later
15510          * as A0 is not in production.
15511          */
15512         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15513                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15514
15515         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15516                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15517
15518         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15519             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15520             tg3_asic_rev(tp) == ASIC_REV_5720)
15521                 tg3_flag_set(tp, 5717_PLUS);
15522
15523         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15524             tg3_asic_rev(tp) == ASIC_REV_57766)
15525                 tg3_flag_set(tp, 57765_CLASS);
15526
15527         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15528              tg3_asic_rev(tp) == ASIC_REV_5762)
15529                 tg3_flag_set(tp, 57765_PLUS);
15530
15531         /* Intentionally exclude ASIC_REV_5906 */
15532         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15533             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15534             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15535             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15536             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15537             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15538             tg3_flag(tp, 57765_PLUS))
15539                 tg3_flag_set(tp, 5755_PLUS);
15540
15541         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15542             tg3_asic_rev(tp) == ASIC_REV_5714)
15543                 tg3_flag_set(tp, 5780_CLASS);
15544
15545         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15546             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15547             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15548             tg3_flag(tp, 5755_PLUS) ||
15549             tg3_flag(tp, 5780_CLASS))
15550                 tg3_flag_set(tp, 5750_PLUS);
15551
15552         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15553             tg3_flag(tp, 5750_PLUS))
15554                 tg3_flag_set(tp, 5705_PLUS);
15555 }
15556
15557 static bool tg3_10_100_only_device(struct tg3 *tp,
15558                                    const struct pci_device_id *ent)
15559 {
15560         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15561
15562         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15563              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15564             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15565                 return true;
15566
15567         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15568                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15569                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15570                                 return true;
15571                 } else {
15572                         return true;
15573                 }
15574         }
15575
15576         return false;
15577 }
15578
15579 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15580 {
15581         u32 misc_ctrl_reg;
15582         u32 pci_state_reg, grc_misc_cfg;
15583         u32 val;
15584         u16 pci_cmd;
15585         int err;
15586
15587         /* Force memory write invalidate off.  If we leave it on,
15588          * then on 5700_BX chips we have to enable a workaround.
15589          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15590          * to match the cacheline size.  The Broadcom driver have this
15591          * workaround but turns MWI off all the times so never uses
15592          * it.  This seems to suggest that the workaround is insufficient.
15593          */
15594         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15595         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15596         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15597
15598         /* Important! -- Make sure register accesses are byteswapped
15599          * correctly.  Also, for those chips that require it, make
15600          * sure that indirect register accesses are enabled before
15601          * the first operation.
15602          */
15603         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15604                               &misc_ctrl_reg);
15605         tp->misc_host_ctrl |= (misc_ctrl_reg &
15606                                MISC_HOST_CTRL_CHIPREV);
15607         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15608                                tp->misc_host_ctrl);
15609
15610         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15611
15612         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15613          * we need to disable memory and use config. cycles
15614          * only to access all registers. The 5702/03 chips
15615          * can mistakenly decode the special cycles from the
15616          * ICH chipsets as memory write cycles, causing corruption
15617          * of register and memory space. Only certain ICH bridges
15618          * will drive special cycles with non-zero data during the
15619          * address phase which can fall within the 5703's address
15620          * range. This is not an ICH bug as the PCI spec allows
15621          * non-zero address during special cycles. However, only
15622          * these ICH bridges are known to drive non-zero addresses
15623          * during special cycles.
15624          *
15625          * Since special cycles do not cross PCI bridges, we only
15626          * enable this workaround if the 5703 is on the secondary
15627          * bus of these ICH bridges.
15628          */
15629         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15630             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15631                 static struct tg3_dev_id {
15632                         u32     vendor;
15633                         u32     device;
15634                         u32     rev;
15635                 } ich_chipsets[] = {
15636                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15637                           PCI_ANY_ID },
15638                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15639                           PCI_ANY_ID },
15640                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15641                           0xa },
15642                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15643                           PCI_ANY_ID },
15644                         { },
15645                 };
15646                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15647                 struct pci_dev *bridge = NULL;
15648
15649                 while (pci_id->vendor != 0) {
15650                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15651                                                 bridge);
15652                         if (!bridge) {
15653                                 pci_id++;
15654                                 continue;
15655                         }
15656                         if (pci_id->rev != PCI_ANY_ID) {
15657                                 if (bridge->revision > pci_id->rev)
15658                                         continue;
15659                         }
15660                         if (bridge->subordinate &&
15661                             (bridge->subordinate->number ==
15662                              tp->pdev->bus->number)) {
15663                                 tg3_flag_set(tp, ICH_WORKAROUND);
15664                                 pci_dev_put(bridge);
15665                                 break;
15666                         }
15667                 }
15668         }
15669
15670         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15671                 static struct tg3_dev_id {
15672                         u32     vendor;
15673                         u32     device;
15674                 } bridge_chipsets[] = {
15675                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15676                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15677                         { },
15678                 };
15679                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15680                 struct pci_dev *bridge = NULL;
15681
15682                 while (pci_id->vendor != 0) {
15683                         bridge = pci_get_device(pci_id->vendor,
15684                                                 pci_id->device,
15685                                                 bridge);
15686                         if (!bridge) {
15687                                 pci_id++;
15688                                 continue;
15689                         }
15690                         if (bridge->subordinate &&
15691                             (bridge->subordinate->number <=
15692                              tp->pdev->bus->number) &&
15693                             (bridge->subordinate->busn_res.end >=
15694                              tp->pdev->bus->number)) {
15695                                 tg3_flag_set(tp, 5701_DMA_BUG);
15696                                 pci_dev_put(bridge);
15697                                 break;
15698                         }
15699                 }
15700         }
15701
15702         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15703          * DMA addresses > 40-bit. This bridge may have other additional
15704          * 57xx devices behind it in some 4-port NIC designs for example.
15705          * Any tg3 device found behind the bridge will also need the 40-bit
15706          * DMA workaround.
15707          */
15708         if (tg3_flag(tp, 5780_CLASS)) {
15709                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15710                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15711         } else {
15712                 struct pci_dev *bridge = NULL;
15713
15714                 do {
15715                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15716                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15717                                                 bridge);
15718                         if (bridge && bridge->subordinate &&
15719                             (bridge->subordinate->number <=
15720                              tp->pdev->bus->number) &&
15721                             (bridge->subordinate->busn_res.end >=
15722                              tp->pdev->bus->number)) {
15723                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15724                                 pci_dev_put(bridge);
15725                                 break;
15726                         }
15727                 } while (bridge);
15728         }
15729
15730         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15731             tg3_asic_rev(tp) == ASIC_REV_5714)
15732                 tp->pdev_peer = tg3_find_peer(tp);
15733
15734         /* Determine TSO capabilities */
15735         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15736                 ; /* Do nothing. HW bug. */
15737         else if (tg3_flag(tp, 57765_PLUS))
15738                 tg3_flag_set(tp, HW_TSO_3);
15739         else if (tg3_flag(tp, 5755_PLUS) ||
15740                  tg3_asic_rev(tp) == ASIC_REV_5906)
15741                 tg3_flag_set(tp, HW_TSO_2);
15742         else if (tg3_flag(tp, 5750_PLUS)) {
15743                 tg3_flag_set(tp, HW_TSO_1);
15744                 tg3_flag_set(tp, TSO_BUG);
15745                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15746                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15747                         tg3_flag_clear(tp, TSO_BUG);
15748         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15749                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15750                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15751                 tg3_flag_set(tp, FW_TSO);
15752                 tg3_flag_set(tp, TSO_BUG);
15753                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15754                         tp->fw_needed = FIRMWARE_TG3TSO5;
15755                 else
15756                         tp->fw_needed = FIRMWARE_TG3TSO;
15757         }
15758
15759         /* Selectively allow TSO based on operating conditions */
15760         if (tg3_flag(tp, HW_TSO_1) ||
15761             tg3_flag(tp, HW_TSO_2) ||
15762             tg3_flag(tp, HW_TSO_3) ||
15763             tg3_flag(tp, FW_TSO)) {
15764                 /* For firmware TSO, assume ASF is disabled.
15765                  * We'll disable TSO later if we discover ASF
15766                  * is enabled in tg3_get_eeprom_hw_cfg().
15767                  */
15768                 tg3_flag_set(tp, TSO_CAPABLE);
15769         } else {
15770                 tg3_flag_clear(tp, TSO_CAPABLE);
15771                 tg3_flag_clear(tp, TSO_BUG);
15772                 tp->fw_needed = NULL;
15773         }
15774
15775         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15776                 tp->fw_needed = FIRMWARE_TG3;
15777
15778         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15779                 tp->fw_needed = FIRMWARE_TG357766;
15780
15781         tp->irq_max = 1;
15782
15783         if (tg3_flag(tp, 5750_PLUS)) {
15784                 tg3_flag_set(tp, SUPPORT_MSI);
15785                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15786                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15787                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15788                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15789                      tp->pdev_peer == tp->pdev))
15790                         tg3_flag_clear(tp, SUPPORT_MSI);
15791
15792                 if (tg3_flag(tp, 5755_PLUS) ||
15793                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15794                         tg3_flag_set(tp, 1SHOT_MSI);
15795                 }
15796
15797                 if (tg3_flag(tp, 57765_PLUS)) {
15798                         tg3_flag_set(tp, SUPPORT_MSIX);
15799                         tp->irq_max = TG3_IRQ_MAX_VECS;
15800                 }
15801         }
15802
15803         tp->txq_max = 1;
15804         tp->rxq_max = 1;
15805         if (tp->irq_max > 1) {
15806                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15807                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15808
15809                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15810                     tg3_asic_rev(tp) == ASIC_REV_5720)
15811                         tp->txq_max = tp->irq_max - 1;
15812         }
15813
15814         if (tg3_flag(tp, 5755_PLUS) ||
15815             tg3_asic_rev(tp) == ASIC_REV_5906)
15816                 tg3_flag_set(tp, SHORT_DMA_BUG);
15817
15818         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15819                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15820
15821         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15822             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15823             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15824             tg3_asic_rev(tp) == ASIC_REV_5762)
15825                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15826
15827         if (tg3_flag(tp, 57765_PLUS) &&
15828             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15829                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15830
15831         if (!tg3_flag(tp, 5705_PLUS) ||
15832             tg3_flag(tp, 5780_CLASS) ||
15833             tg3_flag(tp, USE_JUMBO_BDFLAG))
15834                 tg3_flag_set(tp, JUMBO_CAPABLE);
15835
15836         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15837                               &pci_state_reg);
15838
15839         if (pci_is_pcie(tp->pdev)) {
15840                 u16 lnkctl;
15841
15842                 tg3_flag_set(tp, PCI_EXPRESS);
15843
15844                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15845                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15846                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15847                                 tg3_flag_clear(tp, HW_TSO_2);
15848                                 tg3_flag_clear(tp, TSO_CAPABLE);
15849                         }
15850                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15851                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15852                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15853                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15854                                 tg3_flag_set(tp, CLKREQ_BUG);
15855                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15856                         tg3_flag_set(tp, L1PLLPD_EN);
15857                 }
15858         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15859                 /* BCM5785 devices are effectively PCIe devices, and should
15860                  * follow PCIe codepaths, but do not have a PCIe capabilities
15861                  * section.
15862                  */
15863                 tg3_flag_set(tp, PCI_EXPRESS);
15864         } else if (!tg3_flag(tp, 5705_PLUS) ||
15865                    tg3_flag(tp, 5780_CLASS)) {
15866                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15867                 if (!tp->pcix_cap) {
15868                         dev_err(&tp->pdev->dev,
15869                                 "Cannot find PCI-X capability, aborting\n");
15870                         return -EIO;
15871                 }
15872
15873                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15874                         tg3_flag_set(tp, PCIX_MODE);
15875         }
15876
15877         /* If we have an AMD 762 or VIA K8T800 chipset, write
15878          * reordering to the mailbox registers done by the host
15879          * controller can cause major troubles.  We read back from
15880          * every mailbox register write to force the writes to be
15881          * posted to the chip in order.
15882          */
15883         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15884             !tg3_flag(tp, PCI_EXPRESS))
15885                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15886
15887         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15888                              &tp->pci_cacheline_sz);
15889         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15890                              &tp->pci_lat_timer);
15891         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15892             tp->pci_lat_timer < 64) {
15893                 tp->pci_lat_timer = 64;
15894                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15895                                       tp->pci_lat_timer);
15896         }
15897
15898         /* Important! -- It is critical that the PCI-X hw workaround
15899          * situation is decided before the first MMIO register access.
15900          */
15901         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15902                 /* 5700 BX chips need to have their TX producer index
15903                  * mailboxes written twice to workaround a bug.
15904                  */
15905                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15906
15907                 /* If we are in PCI-X mode, enable register write workaround.
15908                  *
15909                  * The workaround is to use indirect register accesses
15910                  * for all chip writes not to mailbox registers.
15911                  */
15912                 if (tg3_flag(tp, PCIX_MODE)) {
15913                         u32 pm_reg;
15914
15915                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15916
15917                         /* The chip can have it's power management PCI config
15918                          * space registers clobbered due to this bug.
15919                          * So explicitly force the chip into D0 here.
15920                          */
15921                         pci_read_config_dword(tp->pdev,
15922                                               tp->pm_cap + PCI_PM_CTRL,
15923                                               &pm_reg);
15924                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15925                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15926                         pci_write_config_dword(tp->pdev,
15927                                                tp->pm_cap + PCI_PM_CTRL,
15928                                                pm_reg);
15929
15930                         /* Also, force SERR#/PERR# in PCI command. */
15931                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15932                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15933                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15934                 }
15935         }
15936
15937         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15938                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15939         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15940                 tg3_flag_set(tp, PCI_32BIT);
15941
15942         /* Chip-specific fixup from Broadcom driver */
15943         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15944             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15945                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15946                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15947         }
15948
15949         /* Default fast path register access methods */
15950         tp->read32 = tg3_read32;
15951         tp->write32 = tg3_write32;
15952         tp->read32_mbox = tg3_read32;
15953         tp->write32_mbox = tg3_write32;
15954         tp->write32_tx_mbox = tg3_write32;
15955         tp->write32_rx_mbox = tg3_write32;
15956
15957         /* Various workaround register access methods */
15958         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15959                 tp->write32 = tg3_write_indirect_reg32;
15960         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15961                  (tg3_flag(tp, PCI_EXPRESS) &&
15962                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15963                 /*
15964                  * Back to back register writes can cause problems on these
15965                  * chips, the workaround is to read back all reg writes
15966                  * except those to mailbox regs.
15967                  *
15968                  * See tg3_write_indirect_reg32().
15969                  */
15970                 tp->write32 = tg3_write_flush_reg32;
15971         }
15972
15973         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15974                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15975                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15976                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15977         }
15978
15979         if (tg3_flag(tp, ICH_WORKAROUND)) {
15980                 tp->read32 = tg3_read_indirect_reg32;
15981                 tp->write32 = tg3_write_indirect_reg32;
15982                 tp->read32_mbox = tg3_read_indirect_mbox;
15983                 tp->write32_mbox = tg3_write_indirect_mbox;
15984                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15985                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15986
15987                 iounmap(tp->regs);
15988                 tp->regs = NULL;
15989
15990                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15991                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15992                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15993         }
15994         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15995                 tp->read32_mbox = tg3_read32_mbox_5906;
15996                 tp->write32_mbox = tg3_write32_mbox_5906;
15997                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15998                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15999         }
16000
16001         if (tp->write32 == tg3_write_indirect_reg32 ||
16002             (tg3_flag(tp, PCIX_MODE) &&
16003              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16004               tg3_asic_rev(tp) == ASIC_REV_5701)))
16005                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16006
16007         /* The memory arbiter has to be enabled in order for SRAM accesses
16008          * to succeed.  Normally on powerup the tg3 chip firmware will make
16009          * sure it is enabled, but other entities such as system netboot
16010          * code might disable it.
16011          */
16012         val = tr32(MEMARB_MODE);
16013         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16014
16015         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16016         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16017             tg3_flag(tp, 5780_CLASS)) {
16018                 if (tg3_flag(tp, PCIX_MODE)) {
16019                         pci_read_config_dword(tp->pdev,
16020                                               tp->pcix_cap + PCI_X_STATUS,
16021                                               &val);
16022                         tp->pci_fn = val & 0x7;
16023                 }
16024         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16025                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16026                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16027                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16028                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16029                         val = tr32(TG3_CPMU_STATUS);
16030
16031                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16032                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16033                 else
16034                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16035                                      TG3_CPMU_STATUS_FSHFT_5719;
16036         }
16037
16038         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16039                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16040                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16041         }
16042
16043         /* Get eeprom hw config before calling tg3_set_power_state().
16044          * In particular, the TG3_FLAG_IS_NIC flag must be
16045          * determined before calling tg3_set_power_state() so that
16046          * we know whether or not to switch out of Vaux power.
16047          * When the flag is set, it means that GPIO1 is used for eeprom
16048          * write protect and also implies that it is a LOM where GPIOs
16049          * are not used to switch power.
16050          */
16051         tg3_get_eeprom_hw_cfg(tp);
16052
16053         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16054                 tg3_flag_clear(tp, TSO_CAPABLE);
16055                 tg3_flag_clear(tp, TSO_BUG);
16056                 tp->fw_needed = NULL;
16057         }
16058
16059         if (tg3_flag(tp, ENABLE_APE)) {
16060                 /* Allow reads and writes to the
16061                  * APE register and memory space.
16062                  */
16063                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16064                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16065                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16066                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16067                                        pci_state_reg);
16068
16069                 tg3_ape_lock_init(tp);
16070         }
16071
16072         /* Set up tp->grc_local_ctrl before calling
16073          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16074          * will bring 5700's external PHY out of reset.
16075          * It is also used as eeprom write protect on LOMs.
16076          */
16077         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16078         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16079             tg3_flag(tp, EEPROM_WRITE_PROT))
16080                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16081                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16082         /* Unused GPIO3 must be driven as output on 5752 because there
16083          * are no pull-up resistors on unused GPIO pins.
16084          */
16085         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16086                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16087
16088         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16089             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16090             tg3_flag(tp, 57765_CLASS))
16091                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16092
16093         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16094             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16095                 /* Turn off the debug UART. */
16096                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16097                 if (tg3_flag(tp, IS_NIC))
16098                         /* Keep VMain power. */
16099                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16100                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16101         }
16102
16103         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16104                 tp->grc_local_ctrl |=
16105                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16106
16107         /* Switch out of Vaux if it is a NIC */
16108         tg3_pwrsrc_switch_to_vmain(tp);
16109
16110         /* Derive initial jumbo mode from MTU assigned in
16111          * ether_setup() via the alloc_etherdev() call
16112          */
16113         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16114                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16115
16116         /* Determine WakeOnLan speed to use. */
16117         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16118             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16119             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16120             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16121                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16122         } else {
16123                 tg3_flag_set(tp, WOL_SPEED_100MB);
16124         }
16125
16126         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16127                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16128
16129         /* A few boards don't want Ethernet@WireSpeed phy feature */
16130         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16131             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16132              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16133              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16134             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16135             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16136                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16137
16138         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16139             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16140                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16141         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16142                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16143
16144         if (tg3_flag(tp, 5705_PLUS) &&
16145             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16146             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16147             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16148             !tg3_flag(tp, 57765_PLUS)) {
16149                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16150                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16151                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16152                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16153                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16154                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16155                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16156                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16157                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16158                 } else
16159                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16160         }
16161
16162         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16163             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16164                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16165                 if (tp->phy_otp == 0)
16166                         tp->phy_otp = TG3_OTP_DEFAULT;
16167         }
16168
16169         if (tg3_flag(tp, CPMU_PRESENT))
16170                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16171         else
16172                 tp->mi_mode = MAC_MI_MODE_BASE;
16173
16174         tp->coalesce_mode = 0;
16175         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16176             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16177                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16178
16179         /* Set these bits to enable statistics workaround. */
16180         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16181             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16182             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16183                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16184                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16185         }
16186
16187         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16188             tg3_asic_rev(tp) == ASIC_REV_57780)
16189                 tg3_flag_set(tp, USE_PHYLIB);
16190
16191         err = tg3_mdio_init(tp);
16192         if (err)
16193                 return err;
16194
16195         /* Initialize data/descriptor byte/word swapping. */
16196         val = tr32(GRC_MODE);
16197         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16198             tg3_asic_rev(tp) == ASIC_REV_5762)
16199                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16200                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16201                         GRC_MODE_B2HRX_ENABLE |
16202                         GRC_MODE_HTX2B_ENABLE |
16203                         GRC_MODE_HOST_STACKUP);
16204         else
16205                 val &= GRC_MODE_HOST_STACKUP;
16206
16207         tw32(GRC_MODE, val | tp->grc_mode);
16208
16209         tg3_switch_clocks(tp);
16210
16211         /* Clear this out for sanity. */
16212         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16213
16214         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16215                               &pci_state_reg);
16216         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16217             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16218                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16219                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16220                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16221                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16222                         void __iomem *sram_base;
16223
16224                         /* Write some dummy words into the SRAM status block
16225                          * area, see if it reads back correctly.  If the return
16226                          * value is bad, force enable the PCIX workaround.
16227                          */
16228                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16229
16230                         writel(0x00000000, sram_base);
16231                         writel(0x00000000, sram_base + 4);
16232                         writel(0xffffffff, sram_base + 4);
16233                         if (readl(sram_base) != 0x00000000)
16234                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16235                 }
16236         }
16237
16238         udelay(50);
16239         tg3_nvram_init(tp);
16240
16241         /* If the device has an NVRAM, no need to load patch firmware */
16242         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16243             !tg3_flag(tp, NO_NVRAM))
16244                 tp->fw_needed = NULL;
16245
16246         grc_misc_cfg = tr32(GRC_MISC_CFG);
16247         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16248
16249         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16250             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16251              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16252                 tg3_flag_set(tp, IS_5788);
16253
16254         if (!tg3_flag(tp, IS_5788) &&
16255             tg3_asic_rev(tp) != ASIC_REV_5700)
16256                 tg3_flag_set(tp, TAGGED_STATUS);
16257         if (tg3_flag(tp, TAGGED_STATUS)) {
16258                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16259                                       HOSTCC_MODE_CLRTICK_TXBD);
16260
16261                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16262                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16263                                        tp->misc_host_ctrl);
16264         }
16265
16266         /* Preserve the APE MAC_MODE bits */
16267         if (tg3_flag(tp, ENABLE_APE))
16268                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16269         else
16270                 tp->mac_mode = 0;
16271
16272         if (tg3_10_100_only_device(tp, ent))
16273                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16274
16275         err = tg3_phy_probe(tp);
16276         if (err) {
16277                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16278                 /* ... but do not return immediately ... */
16279                 tg3_mdio_fini(tp);
16280         }
16281
16282         tg3_read_vpd(tp);
16283         tg3_read_fw_ver(tp);
16284
16285         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16286                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16287         } else {
16288                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16289                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16290                 else
16291                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16292         }
16293
16294         /* 5700 {AX,BX} chips have a broken status block link
16295          * change bit implementation, so we must use the
16296          * status register in those cases.
16297          */
16298         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16299                 tg3_flag_set(tp, USE_LINKCHG_REG);
16300         else
16301                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16302
16303         /* The led_ctrl is set during tg3_phy_probe, here we might
16304          * have to force the link status polling mechanism based
16305          * upon subsystem IDs.
16306          */
16307         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16308             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16309             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16310                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16311                 tg3_flag_set(tp, USE_LINKCHG_REG);
16312         }
16313
16314         /* For all SERDES we poll the MAC status register. */
16315         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16316                 tg3_flag_set(tp, POLL_SERDES);
16317         else
16318                 tg3_flag_clear(tp, POLL_SERDES);
16319
16320         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16321         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16322         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16323             tg3_flag(tp, PCIX_MODE)) {
16324                 tp->rx_offset = NET_SKB_PAD;
16325 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16326                 tp->rx_copy_thresh = ~(u16)0;
16327 #endif
16328         }
16329
16330         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16331         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16332         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16333
16334         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16335
16336         /* Increment the rx prod index on the rx std ring by at most
16337          * 8 for these chips to workaround hw errata.
16338          */
16339         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16340             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16341             tg3_asic_rev(tp) == ASIC_REV_5755)
16342                 tp->rx_std_max_post = 8;
16343
16344         if (tg3_flag(tp, ASPM_WORKAROUND))
16345                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16346                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16347
16348         return err;
16349 }
16350
16351 #ifdef CONFIG_SPARC
16352 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16353 {
16354         struct net_device *dev = tp->dev;
16355         struct pci_dev *pdev = tp->pdev;
16356         struct device_node *dp = pci_device_to_OF_node(pdev);
16357         const unsigned char *addr;
16358         int len;
16359
16360         addr = of_get_property(dp, "local-mac-address", &len);
16361         if (addr && len == 6) {
16362                 memcpy(dev->dev_addr, addr, 6);
16363                 return 0;
16364         }
16365         return -ENODEV;
16366 }
16367
16368 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16369 {
16370         struct net_device *dev = tp->dev;
16371
16372         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16373         return 0;
16374 }
16375 #endif
16376
16377 static int tg3_get_device_address(struct tg3 *tp)
16378 {
16379         struct net_device *dev = tp->dev;
16380         u32 hi, lo, mac_offset;
16381         int addr_ok = 0;
16382         int err;
16383
16384 #ifdef CONFIG_SPARC
16385         if (!tg3_get_macaddr_sparc(tp))
16386                 return 0;
16387 #endif
16388
16389         if (tg3_flag(tp, IS_SSB_CORE)) {
16390                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16391                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16392                         return 0;
16393         }
16394
16395         mac_offset = 0x7c;
16396         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16397             tg3_flag(tp, 5780_CLASS)) {
16398                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16399                         mac_offset = 0xcc;
16400                 if (tg3_nvram_lock(tp))
16401                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16402                 else
16403                         tg3_nvram_unlock(tp);
16404         } else if (tg3_flag(tp, 5717_PLUS)) {
16405                 if (tp->pci_fn & 1)
16406                         mac_offset = 0xcc;
16407                 if (tp->pci_fn > 1)
16408                         mac_offset += 0x18c;
16409         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16410                 mac_offset = 0x10;
16411
16412         /* First try to get it from MAC address mailbox. */
16413         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16414         if ((hi >> 16) == 0x484b) {
16415                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16416                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16417
16418                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16419                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16420                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16421                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16422                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16423
16424                 /* Some old bootcode may report a 0 MAC address in SRAM */
16425                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16426         }
16427         if (!addr_ok) {
16428                 /* Next, try NVRAM. */
16429                 if (!tg3_flag(tp, NO_NVRAM) &&
16430                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16431                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16432                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16433                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16434                 }
16435                 /* Finally just fetch it out of the MAC control regs. */
16436                 else {
16437                         hi = tr32(MAC_ADDR_0_HIGH);
16438                         lo = tr32(MAC_ADDR_0_LOW);
16439
16440                         dev->dev_addr[5] = lo & 0xff;
16441                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16442                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16443                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16444                         dev->dev_addr[1] = hi & 0xff;
16445                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16446                 }
16447         }
16448
16449         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16450 #ifdef CONFIG_SPARC
16451                 if (!tg3_get_default_macaddr_sparc(tp))
16452                         return 0;
16453 #endif
16454                 return -EINVAL;
16455         }
16456         return 0;
16457 }
16458
16459 #define BOUNDARY_SINGLE_CACHELINE       1
16460 #define BOUNDARY_MULTI_CACHELINE        2
16461
16462 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16463 {
16464         int cacheline_size;
16465         u8 byte;
16466         int goal;
16467
16468         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16469         if (byte == 0)
16470                 cacheline_size = 1024;
16471         else
16472                 cacheline_size = (int) byte * 4;
16473
16474         /* On 5703 and later chips, the boundary bits have no
16475          * effect.
16476          */
16477         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16478             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16479             !tg3_flag(tp, PCI_EXPRESS))
16480                 goto out;
16481
16482 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16483         goal = BOUNDARY_MULTI_CACHELINE;
16484 #else
16485 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16486         goal = BOUNDARY_SINGLE_CACHELINE;
16487 #else
16488         goal = 0;
16489 #endif
16490 #endif
16491
16492         if (tg3_flag(tp, 57765_PLUS)) {
16493                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16494                 goto out;
16495         }
16496
16497         if (!goal)
16498                 goto out;
16499
16500         /* PCI controllers on most RISC systems tend to disconnect
16501          * when a device tries to burst across a cache-line boundary.
16502          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16503          *
16504          * Unfortunately, for PCI-E there are only limited
16505          * write-side controls for this, and thus for reads
16506          * we will still get the disconnects.  We'll also waste
16507          * these PCI cycles for both read and write for chips
16508          * other than 5700 and 5701 which do not implement the
16509          * boundary bits.
16510          */
16511         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16512                 switch (cacheline_size) {
16513                 case 16:
16514                 case 32:
16515                 case 64:
16516                 case 128:
16517                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16518                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16519                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16520                         } else {
16521                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16522                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16523                         }
16524                         break;
16525
16526                 case 256:
16527                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16528                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16529                         break;
16530
16531                 default:
16532                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16533                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16534                         break;
16535                 }
16536         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16537                 switch (cacheline_size) {
16538                 case 16:
16539                 case 32:
16540                 case 64:
16541                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16542                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16543                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16544                                 break;
16545                         }
16546                         /* fallthrough */
16547                 case 128:
16548                 default:
16549                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16550                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16551                         break;
16552                 }
16553         } else {
16554                 switch (cacheline_size) {
16555                 case 16:
16556                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16557                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16558                                         DMA_RWCTRL_WRITE_BNDRY_16);
16559                                 break;
16560                         }
16561                         /* fallthrough */
16562                 case 32:
16563                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16564                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16565                                         DMA_RWCTRL_WRITE_BNDRY_32);
16566                                 break;
16567                         }
16568                         /* fallthrough */
16569                 case 64:
16570                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16571                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16572                                         DMA_RWCTRL_WRITE_BNDRY_64);
16573                                 break;
16574                         }
16575                         /* fallthrough */
16576                 case 128:
16577                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16578                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16579                                         DMA_RWCTRL_WRITE_BNDRY_128);
16580                                 break;
16581                         }
16582                         /* fallthrough */
16583                 case 256:
16584                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16585                                 DMA_RWCTRL_WRITE_BNDRY_256);
16586                         break;
16587                 case 512:
16588                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16589                                 DMA_RWCTRL_WRITE_BNDRY_512);
16590                         break;
16591                 case 1024:
16592                 default:
16593                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16594                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16595                         break;
16596                 }
16597         }
16598
16599 out:
16600         return val;
16601 }
16602
16603 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16604                            int size, bool to_device)
16605 {
16606         struct tg3_internal_buffer_desc test_desc;
16607         u32 sram_dma_descs;
16608         int i, ret;
16609
16610         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16611
16612         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16613         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16614         tw32(RDMAC_STATUS, 0);
16615         tw32(WDMAC_STATUS, 0);
16616
16617         tw32(BUFMGR_MODE, 0);
16618         tw32(FTQ_RESET, 0);
16619
16620         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16621         test_desc.addr_lo = buf_dma & 0xffffffff;
16622         test_desc.nic_mbuf = 0x00002100;
16623         test_desc.len = size;
16624
16625         /*
16626          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16627          * the *second* time the tg3 driver was getting loaded after an
16628          * initial scan.
16629          *
16630          * Broadcom tells me:
16631          *   ...the DMA engine is connected to the GRC block and a DMA
16632          *   reset may affect the GRC block in some unpredictable way...
16633          *   The behavior of resets to individual blocks has not been tested.
16634          *
16635          * Broadcom noted the GRC reset will also reset all sub-components.
16636          */
16637         if (to_device) {
16638                 test_desc.cqid_sqid = (13 << 8) | 2;
16639
16640                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16641                 udelay(40);
16642         } else {
16643                 test_desc.cqid_sqid = (16 << 8) | 7;
16644
16645                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16646                 udelay(40);
16647         }
16648         test_desc.flags = 0x00000005;
16649
16650         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16651                 u32 val;
16652
16653                 val = *(((u32 *)&test_desc) + i);
16654                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16655                                        sram_dma_descs + (i * sizeof(u32)));
16656                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16657         }
16658         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16659
16660         if (to_device)
16661                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16662         else
16663                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16664
16665         ret = -ENODEV;
16666         for (i = 0; i < 40; i++) {
16667                 u32 val;
16668
16669                 if (to_device)
16670                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16671                 else
16672                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16673                 if ((val & 0xffff) == sram_dma_descs) {
16674                         ret = 0;
16675                         break;
16676                 }
16677
16678                 udelay(100);
16679         }
16680
16681         return ret;
16682 }
16683
16684 #define TEST_BUFFER_SIZE        0x2000
16685
16686 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16687         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16688         { },
16689 };
16690
16691 static int tg3_test_dma(struct tg3 *tp)
16692 {
16693         dma_addr_t buf_dma;
16694         u32 *buf, saved_dma_rwctrl;
16695         int ret = 0;
16696
16697         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16698                                  &buf_dma, GFP_KERNEL);
16699         if (!buf) {
16700                 ret = -ENOMEM;
16701                 goto out_nofree;
16702         }
16703
16704         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16705                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16706
16707         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16708
16709         if (tg3_flag(tp, 57765_PLUS))
16710                 goto out;
16711
16712         if (tg3_flag(tp, PCI_EXPRESS)) {
16713                 /* DMA read watermark not used on PCIE */
16714                 tp->dma_rwctrl |= 0x00180000;
16715         } else if (!tg3_flag(tp, PCIX_MODE)) {
16716                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16717                     tg3_asic_rev(tp) == ASIC_REV_5750)
16718                         tp->dma_rwctrl |= 0x003f0000;
16719                 else
16720                         tp->dma_rwctrl |= 0x003f000f;
16721         } else {
16722                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16723                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16724                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16725                         u32 read_water = 0x7;
16726
16727                         /* If the 5704 is behind the EPB bridge, we can
16728                          * do the less restrictive ONE_DMA workaround for
16729                          * better performance.
16730                          */
16731                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16732                             tg3_asic_rev(tp) == ASIC_REV_5704)
16733                                 tp->dma_rwctrl |= 0x8000;
16734                         else if (ccval == 0x6 || ccval == 0x7)
16735                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16736
16737                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16738                                 read_water = 4;
16739                         /* Set bit 23 to enable PCIX hw bug fix */
16740                         tp->dma_rwctrl |=
16741                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16742                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16743                                 (1 << 23);
16744                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16745                         /* 5780 always in PCIX mode */
16746                         tp->dma_rwctrl |= 0x00144000;
16747                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16748                         /* 5714 always in PCIX mode */
16749                         tp->dma_rwctrl |= 0x00148000;
16750                 } else {
16751                         tp->dma_rwctrl |= 0x001b000f;
16752                 }
16753         }
16754         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16755                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16756
16757         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16758             tg3_asic_rev(tp) == ASIC_REV_5704)
16759                 tp->dma_rwctrl &= 0xfffffff0;
16760
16761         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16762             tg3_asic_rev(tp) == ASIC_REV_5701) {
16763                 /* Remove this if it causes problems for some boards. */
16764                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16765
16766                 /* On 5700/5701 chips, we need to set this bit.
16767                  * Otherwise the chip will issue cacheline transactions
16768                  * to streamable DMA memory with not all the byte
16769                  * enables turned on.  This is an error on several
16770                  * RISC PCI controllers, in particular sparc64.
16771                  *
16772                  * On 5703/5704 chips, this bit has been reassigned
16773                  * a different meaning.  In particular, it is used
16774                  * on those chips to enable a PCI-X workaround.
16775                  */
16776                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16777         }
16778
16779         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16780
16781 #if 0
16782         /* Unneeded, already done by tg3_get_invariants.  */
16783         tg3_switch_clocks(tp);
16784 #endif
16785
16786         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16787             tg3_asic_rev(tp) != ASIC_REV_5701)
16788                 goto out;
16789
16790         /* It is best to perform DMA test with maximum write burst size
16791          * to expose the 5700/5701 write DMA bug.
16792          */
16793         saved_dma_rwctrl = tp->dma_rwctrl;
16794         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16795         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16796
16797         while (1) {
16798                 u32 *p = buf, i;
16799
16800                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16801                         p[i] = i;
16802
16803                 /* Send the buffer to the chip. */
16804                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16805                 if (ret) {
16806                         dev_err(&tp->pdev->dev,
16807                                 "%s: Buffer write failed. err = %d\n",
16808                                 __func__, ret);
16809                         break;
16810                 }
16811
16812 #if 0
16813                 /* validate data reached card RAM correctly. */
16814                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16815                         u32 val;
16816                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16817                         if (le32_to_cpu(val) != p[i]) {
16818                                 dev_err(&tp->pdev->dev,
16819                                         "%s: Buffer corrupted on device! "
16820                                         "(%d != %d)\n", __func__, val, i);
16821                                 /* ret = -ENODEV here? */
16822                         }
16823                         p[i] = 0;
16824                 }
16825 #endif
16826                 /* Now read it back. */
16827                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16828                 if (ret) {
16829                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16830                                 "err = %d\n", __func__, ret);
16831                         break;
16832                 }
16833
16834                 /* Verify it. */
16835                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16836                         if (p[i] == i)
16837                                 continue;
16838
16839                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16840                             DMA_RWCTRL_WRITE_BNDRY_16) {
16841                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16842                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16843                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16844                                 break;
16845                         } else {
16846                                 dev_err(&tp->pdev->dev,
16847                                         "%s: Buffer corrupted on read back! "
16848                                         "(%d != %d)\n", __func__, p[i], i);
16849                                 ret = -ENODEV;
16850                                 goto out;
16851                         }
16852                 }
16853
16854                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16855                         /* Success. */
16856                         ret = 0;
16857                         break;
16858                 }
16859         }
16860         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16861             DMA_RWCTRL_WRITE_BNDRY_16) {
16862                 /* DMA test passed without adjusting DMA boundary,
16863                  * now look for chipsets that are known to expose the
16864                  * DMA bug without failing the test.
16865                  */
16866                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16867                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16868                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16869                 } else {
16870                         /* Safe to use the calculated DMA boundary. */
16871                         tp->dma_rwctrl = saved_dma_rwctrl;
16872                 }
16873
16874                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16875         }
16876
16877 out:
16878         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16879 out_nofree:
16880         return ret;
16881 }
16882
16883 static void tg3_init_bufmgr_config(struct tg3 *tp)
16884 {
16885         if (tg3_flag(tp, 57765_PLUS)) {
16886                 tp->bufmgr_config.mbuf_read_dma_low_water =
16887                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16888                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16889                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16890                 tp->bufmgr_config.mbuf_high_water =
16891                         DEFAULT_MB_HIGH_WATER_57765;
16892
16893                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16894                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16895                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16896                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16897                 tp->bufmgr_config.mbuf_high_water_jumbo =
16898                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16899         } else if (tg3_flag(tp, 5705_PLUS)) {
16900                 tp->bufmgr_config.mbuf_read_dma_low_water =
16901                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16902                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16903                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16904                 tp->bufmgr_config.mbuf_high_water =
16905                         DEFAULT_MB_HIGH_WATER_5705;
16906                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16907                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16908                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16909                         tp->bufmgr_config.mbuf_high_water =
16910                                 DEFAULT_MB_HIGH_WATER_5906;
16911                 }
16912
16913                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16914                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16915                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16916                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16917                 tp->bufmgr_config.mbuf_high_water_jumbo =
16918                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16919         } else {
16920                 tp->bufmgr_config.mbuf_read_dma_low_water =
16921                         DEFAULT_MB_RDMA_LOW_WATER;
16922                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16923                         DEFAULT_MB_MACRX_LOW_WATER;
16924                 tp->bufmgr_config.mbuf_high_water =
16925                         DEFAULT_MB_HIGH_WATER;
16926
16927                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16928                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16929                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16930                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16931                 tp->bufmgr_config.mbuf_high_water_jumbo =
16932                         DEFAULT_MB_HIGH_WATER_JUMBO;
16933         }
16934
16935         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16936         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16937 }
16938
16939 static char *tg3_phy_string(struct tg3 *tp)
16940 {
16941         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16942         case TG3_PHY_ID_BCM5400:        return "5400";
16943         case TG3_PHY_ID_BCM5401:        return "5401";
16944         case TG3_PHY_ID_BCM5411:        return "5411";
16945         case TG3_PHY_ID_BCM5701:        return "5701";
16946         case TG3_PHY_ID_BCM5703:        return "5703";
16947         case TG3_PHY_ID_BCM5704:        return "5704";
16948         case TG3_PHY_ID_BCM5705:        return "5705";
16949         case TG3_PHY_ID_BCM5750:        return "5750";
16950         case TG3_PHY_ID_BCM5752:        return "5752";
16951         case TG3_PHY_ID_BCM5714:        return "5714";
16952         case TG3_PHY_ID_BCM5780:        return "5780";
16953         case TG3_PHY_ID_BCM5755:        return "5755";
16954         case TG3_PHY_ID_BCM5787:        return "5787";
16955         case TG3_PHY_ID_BCM5784:        return "5784";
16956         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16957         case TG3_PHY_ID_BCM5906:        return "5906";
16958         case TG3_PHY_ID_BCM5761:        return "5761";
16959         case TG3_PHY_ID_BCM5718C:       return "5718C";
16960         case TG3_PHY_ID_BCM5718S:       return "5718S";
16961         case TG3_PHY_ID_BCM57765:       return "57765";
16962         case TG3_PHY_ID_BCM5719C:       return "5719C";
16963         case TG3_PHY_ID_BCM5720C:       return "5720C";
16964         case TG3_PHY_ID_BCM5762:        return "5762C";
16965         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16966         case 0:                 return "serdes";
16967         default:                return "unknown";
16968         }
16969 }
16970
16971 static char *tg3_bus_string(struct tg3 *tp, char *str)
16972 {
16973         if (tg3_flag(tp, PCI_EXPRESS)) {
16974                 strcpy(str, "PCI Express");
16975                 return str;
16976         } else if (tg3_flag(tp, PCIX_MODE)) {
16977                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16978
16979                 strcpy(str, "PCIX:");
16980
16981                 if ((clock_ctrl == 7) ||
16982                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16983                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16984                         strcat(str, "133MHz");
16985                 else if (clock_ctrl == 0)
16986                         strcat(str, "33MHz");
16987                 else if (clock_ctrl == 2)
16988                         strcat(str, "50MHz");
16989                 else if (clock_ctrl == 4)
16990                         strcat(str, "66MHz");
16991                 else if (clock_ctrl == 6)
16992                         strcat(str, "100MHz");
16993         } else {
16994                 strcpy(str, "PCI:");
16995                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16996                         strcat(str, "66MHz");
16997                 else
16998                         strcat(str, "33MHz");
16999         }
17000         if (tg3_flag(tp, PCI_32BIT))
17001                 strcat(str, ":32-bit");
17002         else
17003                 strcat(str, ":64-bit");
17004         return str;
17005 }
17006
17007 static void tg3_init_coal(struct tg3 *tp)
17008 {
17009         struct ethtool_coalesce *ec = &tp->coal;
17010
17011         memset(ec, 0, sizeof(*ec));
17012         ec->cmd = ETHTOOL_GCOALESCE;
17013         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17014         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17015         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17016         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17017         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17018         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17019         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17020         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17021         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17022
17023         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17024                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17025                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17026                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17027                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17028                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17029         }
17030
17031         if (tg3_flag(tp, 5705_PLUS)) {
17032                 ec->rx_coalesce_usecs_irq = 0;
17033                 ec->tx_coalesce_usecs_irq = 0;
17034                 ec->stats_block_coalesce_usecs = 0;
17035         }
17036 }
17037
17038 static int tg3_init_one(struct pci_dev *pdev,
17039                                   const struct pci_device_id *ent)
17040 {
17041         struct net_device *dev;
17042         struct tg3 *tp;
17043         int i, err, pm_cap;
17044         u32 sndmbx, rcvmbx, intmbx;
17045         char str[40];
17046         u64 dma_mask, persist_dma_mask;
17047         netdev_features_t features = 0;
17048
17049         printk_once(KERN_INFO "%s\n", version);
17050
17051         err = pci_enable_device(pdev);
17052         if (err) {
17053                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17054                 return err;
17055         }
17056
17057         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17058         if (err) {
17059                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17060                 goto err_out_disable_pdev;
17061         }
17062
17063         pci_set_master(pdev);
17064
17065         /* Find power-management capability. */
17066         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17067         if (pm_cap == 0) {
17068                 dev_err(&pdev->dev,
17069                         "Cannot find Power Management capability, aborting\n");
17070                 err = -EIO;
17071                 goto err_out_free_res;
17072         }
17073
17074         err = pci_set_power_state(pdev, PCI_D0);
17075         if (err) {
17076                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17077                 goto err_out_free_res;
17078         }
17079
17080         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17081         if (!dev) {
17082                 err = -ENOMEM;
17083                 goto err_out_power_down;
17084         }
17085
17086         SET_NETDEV_DEV(dev, &pdev->dev);
17087
17088         tp = netdev_priv(dev);
17089         tp->pdev = pdev;
17090         tp->dev = dev;
17091         tp->pm_cap = pm_cap;
17092         tp->rx_mode = TG3_DEF_RX_MODE;
17093         tp->tx_mode = TG3_DEF_TX_MODE;
17094         tp->irq_sync = 1;
17095
17096         if (tg3_debug > 0)
17097                 tp->msg_enable = tg3_debug;
17098         else
17099                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17100
17101         if (pdev_is_ssb_gige_core(pdev)) {
17102                 tg3_flag_set(tp, IS_SSB_CORE);
17103                 if (ssb_gige_must_flush_posted_writes(pdev))
17104                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17105                 if (ssb_gige_one_dma_at_once(pdev))
17106                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17107                 if (ssb_gige_have_roboswitch(pdev))
17108                         tg3_flag_set(tp, ROBOSWITCH);
17109                 if (ssb_gige_is_rgmii(pdev))
17110                         tg3_flag_set(tp, RGMII_MODE);
17111         }
17112
17113         /* The word/byte swap controls here control register access byte
17114          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17115          * setting below.
17116          */
17117         tp->misc_host_ctrl =
17118                 MISC_HOST_CTRL_MASK_PCI_INT |
17119                 MISC_HOST_CTRL_WORD_SWAP |
17120                 MISC_HOST_CTRL_INDIR_ACCESS |
17121                 MISC_HOST_CTRL_PCISTATE_RW;
17122
17123         /* The NONFRM (non-frame) byte/word swap controls take effect
17124          * on descriptor entries, anything which isn't packet data.
17125          *
17126          * The StrongARM chips on the board (one for tx, one for rx)
17127          * are running in big-endian mode.
17128          */
17129         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17130                         GRC_MODE_WSWAP_NONFRM_DATA);
17131 #ifdef __BIG_ENDIAN
17132         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17133 #endif
17134         spin_lock_init(&tp->lock);
17135         spin_lock_init(&tp->indirect_lock);
17136         INIT_WORK(&tp->reset_task, tg3_reset_task);
17137
17138         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17139         if (!tp->regs) {
17140                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17141                 err = -ENOMEM;
17142                 goto err_out_free_dev;
17143         }
17144
17145         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17146             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17147             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17148             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17149             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17150             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17151             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17152             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17153             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17154             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17155             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17156             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17157                 tg3_flag_set(tp, ENABLE_APE);
17158                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17159                 if (!tp->aperegs) {
17160                         dev_err(&pdev->dev,
17161                                 "Cannot map APE registers, aborting\n");
17162                         err = -ENOMEM;
17163                         goto err_out_iounmap;
17164                 }
17165         }
17166
17167         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17168         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17169
17170         dev->ethtool_ops = &tg3_ethtool_ops;
17171         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17172         dev->netdev_ops = &tg3_netdev_ops;
17173         dev->irq = pdev->irq;
17174
17175         err = tg3_get_invariants(tp, ent);
17176         if (err) {
17177                 dev_err(&pdev->dev,
17178                         "Problem fetching invariants of chip, aborting\n");
17179                 goto err_out_apeunmap;
17180         }
17181
17182         /* The EPB bridge inside 5714, 5715, and 5780 and any
17183          * device behind the EPB cannot support DMA addresses > 40-bit.
17184          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17185          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17186          * do DMA address check in tg3_start_xmit().
17187          */
17188         if (tg3_flag(tp, IS_5788))
17189                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17190         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17191                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17192 #ifdef CONFIG_HIGHMEM
17193                 dma_mask = DMA_BIT_MASK(64);
17194 #endif
17195         } else
17196                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17197
17198         /* Configure DMA attributes. */
17199         if (dma_mask > DMA_BIT_MASK(32)) {
17200                 err = pci_set_dma_mask(pdev, dma_mask);
17201                 if (!err) {
17202                         features |= NETIF_F_HIGHDMA;
17203                         err = pci_set_consistent_dma_mask(pdev,
17204                                                           persist_dma_mask);
17205                         if (err < 0) {
17206                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17207                                         "DMA for consistent allocations\n");
17208                                 goto err_out_apeunmap;
17209                         }
17210                 }
17211         }
17212         if (err || dma_mask == DMA_BIT_MASK(32)) {
17213                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17214                 if (err) {
17215                         dev_err(&pdev->dev,
17216                                 "No usable DMA configuration, aborting\n");
17217                         goto err_out_apeunmap;
17218                 }
17219         }
17220
17221         tg3_init_bufmgr_config(tp);
17222
17223         features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17224
17225         /* 5700 B0 chips do not support checksumming correctly due
17226          * to hardware bugs.
17227          */
17228         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17229                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17230
17231                 if (tg3_flag(tp, 5755_PLUS))
17232                         features |= NETIF_F_IPV6_CSUM;
17233         }
17234
17235         /* TSO is on by default on chips that support hardware TSO.
17236          * Firmware TSO on older chips gives lower performance, so it
17237          * is off by default, but can be enabled using ethtool.
17238          */
17239         if ((tg3_flag(tp, HW_TSO_1) ||
17240              tg3_flag(tp, HW_TSO_2) ||
17241              tg3_flag(tp, HW_TSO_3)) &&
17242             (features & NETIF_F_IP_CSUM))
17243                 features |= NETIF_F_TSO;
17244         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17245                 if (features & NETIF_F_IPV6_CSUM)
17246                         features |= NETIF_F_TSO6;
17247                 if (tg3_flag(tp, HW_TSO_3) ||
17248                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17249                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17250                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17251                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17252                     tg3_asic_rev(tp) == ASIC_REV_57780)
17253                         features |= NETIF_F_TSO_ECN;
17254         }
17255
17256         dev->features |= features;
17257         dev->vlan_features |= features;
17258
17259         /*
17260          * Add loopback capability only for a subset of devices that support
17261          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17262          * loopback for the remaining devices.
17263          */
17264         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17265             !tg3_flag(tp, CPMU_PRESENT))
17266                 /* Add the loopback capability */
17267                 features |= NETIF_F_LOOPBACK;
17268
17269         dev->hw_features |= features;
17270
17271         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17272             !tg3_flag(tp, TSO_CAPABLE) &&
17273             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17274                 tg3_flag_set(tp, MAX_RXPEND_64);
17275                 tp->rx_pending = 63;
17276         }
17277
17278         err = tg3_get_device_address(tp);
17279         if (err) {
17280                 dev_err(&pdev->dev,
17281                         "Could not obtain valid ethernet address, aborting\n");
17282                 goto err_out_apeunmap;
17283         }
17284
17285         /*
17286          * Reset chip in case UNDI or EFI driver did not shutdown
17287          * DMA self test will enable WDMAC and we'll see (spurious)
17288          * pending DMA on the PCI bus at that point.
17289          */
17290         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17291             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17292                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17293                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17294         }
17295
17296         err = tg3_test_dma(tp);
17297         if (err) {
17298                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17299                 goto err_out_apeunmap;
17300         }
17301
17302         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17303         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17304         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17305         for (i = 0; i < tp->irq_max; i++) {
17306                 struct tg3_napi *tnapi = &tp->napi[i];
17307
17308                 tnapi->tp = tp;
17309                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17310
17311                 tnapi->int_mbox = intmbx;
17312                 if (i <= 4)
17313                         intmbx += 0x8;
17314                 else
17315                         intmbx += 0x4;
17316
17317                 tnapi->consmbox = rcvmbx;
17318                 tnapi->prodmbox = sndmbx;
17319
17320                 if (i)
17321                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17322                 else
17323                         tnapi->coal_now = HOSTCC_MODE_NOW;
17324
17325                 if (!tg3_flag(tp, SUPPORT_MSIX))
17326                         break;
17327
17328                 /*
17329                  * If we support MSIX, we'll be using RSS.  If we're using
17330                  * RSS, the first vector only handles link interrupts and the
17331                  * remaining vectors handle rx and tx interrupts.  Reuse the
17332                  * mailbox values for the next iteration.  The values we setup
17333                  * above are still useful for the single vectored mode.
17334                  */
17335                 if (!i)
17336                         continue;
17337
17338                 rcvmbx += 0x8;
17339
17340                 if (sndmbx & 0x4)
17341                         sndmbx -= 0x4;
17342                 else
17343                         sndmbx += 0xc;
17344         }
17345
17346         tg3_init_coal(tp);
17347
17348         pci_set_drvdata(pdev, dev);
17349
17350         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17351             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17352             tg3_asic_rev(tp) == ASIC_REV_5762)
17353                 tg3_flag_set(tp, PTP_CAPABLE);
17354
17355         if (tg3_flag(tp, 5717_PLUS)) {
17356                 /* Resume a low-power mode */
17357                 tg3_frob_aux_power(tp, false);
17358         }
17359
17360         tg3_timer_init(tp);
17361
17362         tg3_carrier_off(tp);
17363
17364         err = register_netdev(dev);
17365         if (err) {
17366                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17367                 goto err_out_apeunmap;
17368         }
17369
17370         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17371                     tp->board_part_number,
17372                     tg3_chip_rev_id(tp),
17373                     tg3_bus_string(tp, str),
17374                     dev->dev_addr);
17375
17376         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17377                 struct phy_device *phydev;
17378                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17379                 netdev_info(dev,
17380                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17381                             phydev->drv->name, dev_name(&phydev->dev));
17382         } else {
17383                 char *ethtype;
17384
17385                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17386                         ethtype = "10/100Base-TX";
17387                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17388                         ethtype = "1000Base-SX";
17389                 else
17390                         ethtype = "10/100/1000Base-T";
17391
17392                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17393                             "(WireSpeed[%d], EEE[%d])\n",
17394                             tg3_phy_string(tp), ethtype,
17395                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17396                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17397         }
17398
17399         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17400                     (dev->features & NETIF_F_RXCSUM) != 0,
17401                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17402                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17403                     tg3_flag(tp, ENABLE_ASF) != 0,
17404                     tg3_flag(tp, TSO_CAPABLE) != 0);
17405         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17406                     tp->dma_rwctrl,
17407                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17408                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17409
17410         pci_save_state(pdev);
17411
17412         return 0;
17413
17414 err_out_apeunmap:
17415         if (tp->aperegs) {
17416                 iounmap(tp->aperegs);
17417                 tp->aperegs = NULL;
17418         }
17419
17420 err_out_iounmap:
17421         if (tp->regs) {
17422                 iounmap(tp->regs);
17423                 tp->regs = NULL;
17424         }
17425
17426 err_out_free_dev:
17427         free_netdev(dev);
17428
17429 err_out_power_down:
17430         pci_set_power_state(pdev, PCI_D3hot);
17431
17432 err_out_free_res:
17433         pci_release_regions(pdev);
17434
17435 err_out_disable_pdev:
17436         pci_disable_device(pdev);
17437         pci_set_drvdata(pdev, NULL);
17438         return err;
17439 }
17440
17441 static void tg3_remove_one(struct pci_dev *pdev)
17442 {
17443         struct net_device *dev = pci_get_drvdata(pdev);
17444
17445         if (dev) {
17446                 struct tg3 *tp = netdev_priv(dev);
17447
17448                 release_firmware(tp->fw);
17449
17450                 tg3_reset_task_cancel(tp);
17451
17452                 if (tg3_flag(tp, USE_PHYLIB)) {
17453                         tg3_phy_fini(tp);
17454                         tg3_mdio_fini(tp);
17455                 }
17456
17457                 unregister_netdev(dev);
17458                 if (tp->aperegs) {
17459                         iounmap(tp->aperegs);
17460                         tp->aperegs = NULL;
17461                 }
17462                 if (tp->regs) {
17463                         iounmap(tp->regs);
17464                         tp->regs = NULL;
17465                 }
17466                 free_netdev(dev);
17467                 pci_release_regions(pdev);
17468                 pci_disable_device(pdev);
17469                 pci_set_drvdata(pdev, NULL);
17470         }
17471 }
17472
17473 #ifdef CONFIG_PM_SLEEP
17474 static int tg3_suspend(struct device *device)
17475 {
17476         struct pci_dev *pdev = to_pci_dev(device);
17477         struct net_device *dev = pci_get_drvdata(pdev);
17478         struct tg3 *tp = netdev_priv(dev);
17479         int err;
17480
17481         if (!netif_running(dev))
17482                 return 0;
17483
17484         tg3_reset_task_cancel(tp);
17485         tg3_phy_stop(tp);
17486         tg3_netif_stop(tp);
17487
17488         tg3_timer_stop(tp);
17489
17490         tg3_full_lock(tp, 1);
17491         tg3_disable_ints(tp);
17492         tg3_full_unlock(tp);
17493
17494         netif_device_detach(dev);
17495
17496         tg3_full_lock(tp, 0);
17497         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17498         tg3_flag_clear(tp, INIT_COMPLETE);
17499         tg3_full_unlock(tp);
17500
17501         err = tg3_power_down_prepare(tp);
17502         if (err) {
17503                 int err2;
17504
17505                 tg3_full_lock(tp, 0);
17506
17507                 tg3_flag_set(tp, INIT_COMPLETE);
17508                 err2 = tg3_restart_hw(tp, true);
17509                 if (err2)
17510                         goto out;
17511
17512                 tg3_timer_start(tp);
17513
17514                 netif_device_attach(dev);
17515                 tg3_netif_start(tp);
17516
17517 out:
17518                 tg3_full_unlock(tp);
17519
17520                 if (!err2)
17521                         tg3_phy_start(tp);
17522         }
17523
17524         return err;
17525 }
17526
17527 static int tg3_resume(struct device *device)
17528 {
17529         struct pci_dev *pdev = to_pci_dev(device);
17530         struct net_device *dev = pci_get_drvdata(pdev);
17531         struct tg3 *tp = netdev_priv(dev);
17532         int err;
17533
17534         if (!netif_running(dev))
17535                 return 0;
17536
17537         netif_device_attach(dev);
17538
17539         tg3_full_lock(tp, 0);
17540
17541         tg3_flag_set(tp, INIT_COMPLETE);
17542         err = tg3_restart_hw(tp,
17543                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17544         if (err)
17545                 goto out;
17546
17547         tg3_timer_start(tp);
17548
17549         tg3_netif_start(tp);
17550
17551 out:
17552         tg3_full_unlock(tp);
17553
17554         if (!err)
17555                 tg3_phy_start(tp);
17556
17557         return err;
17558 }
17559 #endif /* CONFIG_PM_SLEEP */
17560
17561 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17562
17563 /**
17564  * tg3_io_error_detected - called when PCI error is detected
17565  * @pdev: Pointer to PCI device
17566  * @state: The current pci connection state
17567  *
17568  * This function is called after a PCI bus error affecting
17569  * this device has been detected.
17570  */
17571 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17572                                               pci_channel_state_t state)
17573 {
17574         struct net_device *netdev = pci_get_drvdata(pdev);
17575         struct tg3 *tp = netdev_priv(netdev);
17576         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17577
17578         netdev_info(netdev, "PCI I/O error detected\n");
17579
17580         rtnl_lock();
17581
17582         if (!netif_running(netdev))
17583                 goto done;
17584
17585         tg3_phy_stop(tp);
17586
17587         tg3_netif_stop(tp);
17588
17589         tg3_timer_stop(tp);
17590
17591         /* Want to make sure that the reset task doesn't run */
17592         tg3_reset_task_cancel(tp);
17593
17594         netif_device_detach(netdev);
17595
17596         /* Clean up software state, even if MMIO is blocked */
17597         tg3_full_lock(tp, 0);
17598         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17599         tg3_full_unlock(tp);
17600
17601 done:
17602         if (state == pci_channel_io_perm_failure)
17603                 err = PCI_ERS_RESULT_DISCONNECT;
17604         else
17605                 pci_disable_device(pdev);
17606
17607         rtnl_unlock();
17608
17609         return err;
17610 }
17611
17612 /**
17613  * tg3_io_slot_reset - called after the pci bus has been reset.
17614  * @pdev: Pointer to PCI device
17615  *
17616  * Restart the card from scratch, as if from a cold-boot.
17617  * At this point, the card has exprienced a hard reset,
17618  * followed by fixups by BIOS, and has its config space
17619  * set up identically to what it was at cold boot.
17620  */
17621 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17622 {
17623         struct net_device *netdev = pci_get_drvdata(pdev);
17624         struct tg3 *tp = netdev_priv(netdev);
17625         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17626         int err;
17627
17628         rtnl_lock();
17629
17630         if (pci_enable_device(pdev)) {
17631                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17632                 goto done;
17633         }
17634
17635         pci_set_master(pdev);
17636         pci_restore_state(pdev);
17637         pci_save_state(pdev);
17638
17639         if (!netif_running(netdev)) {
17640                 rc = PCI_ERS_RESULT_RECOVERED;
17641                 goto done;
17642         }
17643
17644         err = tg3_power_up(tp);
17645         if (err)
17646                 goto done;
17647
17648         rc = PCI_ERS_RESULT_RECOVERED;
17649
17650 done:
17651         rtnl_unlock();
17652
17653         return rc;
17654 }
17655
17656 /**
17657  * tg3_io_resume - called when traffic can start flowing again.
17658  * @pdev: Pointer to PCI device
17659  *
17660  * This callback is called when the error recovery driver tells
17661  * us that its OK to resume normal operation.
17662  */
17663 static void tg3_io_resume(struct pci_dev *pdev)
17664 {
17665         struct net_device *netdev = pci_get_drvdata(pdev);
17666         struct tg3 *tp = netdev_priv(netdev);
17667         int err;
17668
17669         rtnl_lock();
17670
17671         if (!netif_running(netdev))
17672                 goto done;
17673
17674         tg3_full_lock(tp, 0);
17675         tg3_flag_set(tp, INIT_COMPLETE);
17676         err = tg3_restart_hw(tp, true);
17677         if (err) {
17678                 tg3_full_unlock(tp);
17679                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17680                 goto done;
17681         }
17682
17683         netif_device_attach(netdev);
17684
17685         tg3_timer_start(tp);
17686
17687         tg3_netif_start(tp);
17688
17689         tg3_full_unlock(tp);
17690
17691         tg3_phy_start(tp);
17692
17693 done:
17694         rtnl_unlock();
17695 }
17696
17697 static const struct pci_error_handlers tg3_err_handler = {
17698         .error_detected = tg3_io_error_detected,
17699         .slot_reset     = tg3_io_slot_reset,
17700         .resume         = tg3_io_resume
17701 };
17702
17703 static struct pci_driver tg3_driver = {
17704         .name           = DRV_MODULE_NAME,
17705         .id_table       = tg3_pci_tbl,
17706         .probe          = tg3_init_one,
17707         .remove         = tg3_remove_one,
17708         .err_handler    = &tg3_err_handler,
17709         .driver.pm      = &tg3_pm_ops,
17710 };
17711
17712 static int __init tg3_init(void)
17713 {
17714         return pci_register_driver(&tg3_driver);
17715 }
17716
17717 static void __exit tg3_cleanup(void)
17718 {
17719         pci_unregister_driver(&tg3_driver);
17720 }
17721
17722 module_init(tg3_init);
17723 module_exit(tg3_cleanup);