ec244c9a6dd6c697e7ea0329ffa0d5cad451d241
[cascardo/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     132
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "May 21, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 if (pci_channel_offline(tp->pdev))
748                         break;
749
750                 udelay(10);
751         }
752
753         if (status != bit) {
754                 /* Revoke the lock request. */
755                 tg3_ape_write32(tp, gnt + off, bit);
756                 ret = -EBUSY;
757         }
758
759         return ret;
760 }
761
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 {
764         u32 gnt, bit;
765
766         if (!tg3_flag(tp, ENABLE_APE))
767                 return;
768
769         switch (locknum) {
770         case TG3_APE_LOCK_GPIO:
771                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
772                         return;
773         case TG3_APE_LOCK_GRC:
774         case TG3_APE_LOCK_MEM:
775                 if (!tp->pci_fn)
776                         bit = APE_LOCK_GRANT_DRIVER;
777                 else
778                         bit = 1 << tp->pci_fn;
779                 break;
780         case TG3_APE_LOCK_PHY0:
781         case TG3_APE_LOCK_PHY1:
782         case TG3_APE_LOCK_PHY2:
783         case TG3_APE_LOCK_PHY3:
784                 bit = APE_LOCK_GRANT_DRIVER;
785                 break;
786         default:
787                 return;
788         }
789
790         if (tg3_asic_rev(tp) == ASIC_REV_5761)
791                 gnt = TG3_APE_LOCK_GRANT;
792         else
793                 gnt = TG3_APE_PER_LOCK_GRANT;
794
795         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
796 }
797
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 {
800         u32 apedata;
801
802         while (timeout_us) {
803                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
804                         return -EBUSY;
805
806                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
808                         break;
809
810                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
811
812                 udelay(10);
813                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
814         }
815
816         return timeout_us ? 0 : -EBUSY;
817 }
818
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 {
821         u32 i, apedata;
822
823         for (i = 0; i < timeout_us / 10; i++) {
824                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825
826                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
827                         break;
828
829                 udelay(10);
830         }
831
832         return i == timeout_us / 10;
833 }
834
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836                                    u32 len)
837 {
838         int err;
839         u32 i, bufoff, msgoff, maxlen, apedata;
840
841         if (!tg3_flag(tp, APE_HAS_NCSI))
842                 return 0;
843
844         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845         if (apedata != APE_SEG_SIG_MAGIC)
846                 return -ENODEV;
847
848         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849         if (!(apedata & APE_FW_STATUS_READY))
850                 return -EAGAIN;
851
852         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
853                  TG3_APE_SHMEM_BASE;
854         msgoff = bufoff + 2 * sizeof(u32);
855         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856
857         while (len) {
858                 u32 length;
859
860                 /* Cap xfer sizes to scratchpad limits. */
861                 length = (len > maxlen) ? maxlen : len;
862                 len -= length;
863
864                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865                 if (!(apedata & APE_FW_STATUS_READY))
866                         return -EAGAIN;
867
868                 /* Wait for up to 1 msec for APE to service previous event. */
869                 err = tg3_ape_event_lock(tp, 1000);
870                 if (err)
871                         return err;
872
873                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874                           APE_EVENT_STATUS_SCRTCHPD_READ |
875                           APE_EVENT_STATUS_EVENT_PENDING;
876                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
877
878                 tg3_ape_write32(tp, bufoff, base_off);
879                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
880
881                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883
884                 base_off += length;
885
886                 if (tg3_ape_wait_for_event(tp, 30000))
887                         return -EAGAIN;
888
889                 for (i = 0; length; i += 4, length -= 4) {
890                         u32 val = tg3_ape_read32(tp, msgoff + i);
891                         memcpy(data, &val, sizeof(u32));
892                         data++;
893                 }
894         }
895
896         return 0;
897 }
898
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
900 {
901         int err;
902         u32 apedata;
903
904         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905         if (apedata != APE_SEG_SIG_MAGIC)
906                 return -EAGAIN;
907
908         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909         if (!(apedata & APE_FW_STATUS_READY))
910                 return -EAGAIN;
911
912         /* Wait for up to 1 millisecond for APE to service previous event. */
913         err = tg3_ape_event_lock(tp, 1000);
914         if (err)
915                 return err;
916
917         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918                         event | APE_EVENT_STATUS_EVENT_PENDING);
919
920         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
922
923         return 0;
924 }
925
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
927 {
928         u32 event;
929         u32 apedata;
930
931         if (!tg3_flag(tp, ENABLE_APE))
932                 return;
933
934         switch (kind) {
935         case RESET_KIND_INIT:
936                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937                                 APE_HOST_SEG_SIG_MAGIC);
938                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939                                 APE_HOST_SEG_LEN_MAGIC);
940                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945                                 APE_HOST_BEHAV_NO_PHYLOCK);
946                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947                                     TG3_APE_HOST_DRVR_STATE_START);
948
949                 event = APE_EVENT_STATUS_STATE_START;
950                 break;
951         case RESET_KIND_SHUTDOWN:
952                 /* With the interface we are currently using,
953                  * APE does not track driver state.  Wiping
954                  * out the HOST SEGMENT SIGNATURE forces
955                  * the APE to assume OS absent status.
956                  */
957                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
958
959                 if (device_may_wakeup(&tp->pdev->dev) &&
960                     tg3_flag(tp, WOL_ENABLE)) {
961                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962                                             TG3_APE_HOST_WOL_SPEED_AUTO);
963                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
964                 } else
965                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
966
967                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
968
969                 event = APE_EVENT_STATUS_STATE_UNLOAD;
970                 break;
971         default:
972                 return;
973         }
974
975         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977         tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tw32(TG3PCI_MISC_HOST_CTRL,
985              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986         for (i = 0; i < tp->irq_max; i++)
987                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992         int i;
993
994         tp->irq_sync = 0;
995         wmb();
996
997         tw32(TG3PCI_MISC_HOST_CTRL,
998              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001         for (i = 0; i < tp->irq_cnt; i++) {
1002                 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005                 if (tg3_flag(tp, 1SHOT_MSI))
1006                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008                 tp->coal_now |= tnapi->coal_now;
1009         }
1010
1011         /* Force an initial interrupt */
1012         if (!tg3_flag(tp, TAGGED_STATUS) &&
1013             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015         else
1016                 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023         struct tg3 *tp = tnapi->tp;
1024         struct tg3_hw_status *sblk = tnapi->hw_status;
1025         unsigned int work_exists = 0;
1026
1027         /* check for phy events */
1028         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029                 if (sblk->status & SD_STATUS_LINK_CHG)
1030                         work_exists = 1;
1031         }
1032
1033         /* check for TX work to do */
1034         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035                 work_exists = 1;
1036
1037         /* check for RX work to do */
1038         if (tnapi->rx_rcb_prod_idx &&
1039             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040                 work_exists = 1;
1041
1042         return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052         struct tg3 *tp = tnapi->tp;
1053
1054         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055         mmiowb();
1056
1057         /* When doing tagged status, this work check is unnecessary.
1058          * The last_tag we write above tells the chip which piece of
1059          * work we've completed.
1060          */
1061         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068         u32 clock_ctrl;
1069         u32 orig_clock_ctrl;
1070
1071         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072                 return;
1073
1074         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076         orig_clock_ctrl = clock_ctrl;
1077         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078                        CLOCK_CTRL_CLKRUN_OENABLE |
1079                        0x1f);
1080         tp->pci_clock_ctrl = clock_ctrl;
1081
1082         if (tg3_flag(tp, 5705_PLUS)) {
1083                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086                 }
1087         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089                             clock_ctrl |
1090                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091                             40);
1092                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094                             40);
1095         }
1096         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS  5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102                          u32 *val)
1103 {
1104         u32 frame_val;
1105         unsigned int loops;
1106         int ret;
1107
1108         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109                 tw32_f(MAC_MI_MODE,
1110                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111                 udelay(80);
1112         }
1113
1114         tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116         *val = 0x0;
1117
1118         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119                       MI_COM_PHY_ADDR_MASK);
1120         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121                       MI_COM_REG_ADDR_MASK);
1122         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124         tw32_f(MAC_MI_COM, frame_val);
1125
1126         loops = PHY_BUSY_LOOPS;
1127         while (loops != 0) {
1128                 udelay(10);
1129                 frame_val = tr32(MAC_MI_COM);
1130
1131                 if ((frame_val & MI_COM_BUSY) == 0) {
1132                         udelay(5);
1133                         frame_val = tr32(MAC_MI_COM);
1134                         break;
1135                 }
1136                 loops -= 1;
1137         }
1138
1139         ret = -EBUSY;
1140         if (loops != 0) {
1141                 *val = frame_val & MI_COM_DATA_MASK;
1142                 ret = 0;
1143         }
1144
1145         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147                 udelay(80);
1148         }
1149
1150         tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152         return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161                           u32 val)
1162 {
1163         u32 frame_val;
1164         unsigned int loops;
1165         int ret;
1166
1167         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169                 return 0;
1170
1171         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172                 tw32_f(MAC_MI_MODE,
1173                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174                 udelay(80);
1175         }
1176
1177         tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180                       MI_COM_PHY_ADDR_MASK);
1181         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182                       MI_COM_REG_ADDR_MASK);
1183         frame_val |= (val & MI_COM_DATA_MASK);
1184         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186         tw32_f(MAC_MI_COM, frame_val);
1187
1188         loops = PHY_BUSY_LOOPS;
1189         while (loops != 0) {
1190                 udelay(10);
1191                 frame_val = tr32(MAC_MI_COM);
1192                 if ((frame_val & MI_COM_BUSY) == 0) {
1193                         udelay(5);
1194                         frame_val = tr32(MAC_MI_COM);
1195                         break;
1196                 }
1197                 loops -= 1;
1198         }
1199
1200         ret = -EBUSY;
1201         if (loops != 0)
1202                 ret = 0;
1203
1204         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206                 udelay(80);
1207         }
1208
1209         tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211         return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221         int err;
1222
1223         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224         if (err)
1225                 goto done;
1226
1227         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228         if (err)
1229                 goto done;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239         return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244         int err;
1245
1246         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251         if (err)
1252                 goto done;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262         return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270         if (!err)
1271                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273         return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278         int err;
1279
1280         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281         if (!err)
1282                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284         return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289         int err;
1290
1291         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1294         if (!err)
1295                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297         return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303                 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310         u32 val;
1311         int err;
1312
1313         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315         if (err)
1316                 return err;
1317
1318         if (enable)
1319                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320         else
1321                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326         return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331         u32 phy_control;
1332         int limit, err;
1333
1334         /* OK, reset it, and poll the BMCR_RESET bit until it
1335          * clears or we time out.
1336          */
1337         phy_control = BMCR_RESET;
1338         err = tg3_writephy(tp, MII_BMCR, phy_control);
1339         if (err != 0)
1340                 return -EBUSY;
1341
1342         limit = 5000;
1343         while (limit--) {
1344                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345                 if (err != 0)
1346                         return -EBUSY;
1347
1348                 if ((phy_control & BMCR_RESET) == 0) {
1349                         udelay(40);
1350                         break;
1351                 }
1352                 udelay(10);
1353         }
1354         if (limit < 0)
1355                 return -EBUSY;
1356
1357         return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362         struct tg3 *tp = bp->priv;
1363         u32 val;
1364
1365         spin_lock_bh(&tp->lock);
1366
1367         if (tg3_readphy(tp, reg, &val))
1368                 val = -EIO;
1369
1370         spin_unlock_bh(&tp->lock);
1371
1372         return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377         struct tg3 *tp = bp->priv;
1378         u32 ret = 0;
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         if (tg3_writephy(tp, reg, val))
1383                 ret = -EIO;
1384
1385         spin_unlock_bh(&tp->lock);
1386
1387         return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392         return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397         u32 val;
1398         struct phy_device *phydev;
1399
1400         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402         case PHY_ID_BCM50610:
1403         case PHY_ID_BCM50610M:
1404                 val = MAC_PHYCFG2_50610_LED_MODES;
1405                 break;
1406         case PHY_ID_BCMAC131:
1407                 val = MAC_PHYCFG2_AC131_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8211C:
1410                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8201E:
1413                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414                 break;
1415         default:
1416                 return;
1417         }
1418
1419         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420                 tw32(MAC_PHYCFG2, val);
1421
1422                 val = tr32(MAC_PHYCFG1);
1423                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426                 tw32(MAC_PHYCFG1, val);
1427
1428                 return;
1429         }
1430
1431         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433                        MAC_PHYCFG2_FMODE_MASK_MASK |
1434                        MAC_PHYCFG2_GMODE_MASK_MASK |
1435                        MAC_PHYCFG2_ACT_MASK_MASK   |
1436                        MAC_PHYCFG2_QUAL_MASK_MASK |
1437                        MAC_PHYCFG2_INBAND_ENABLE;
1438
1439         tw32(MAC_PHYCFG2, val);
1440
1441         val = tr32(MAC_PHYCFG1);
1442         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449         }
1450         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452         tw32(MAC_PHYCFG1, val);
1453
1454         val = tr32(MAC_EXT_RGMII_MODE);
1455         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456                  MAC_RGMII_MODE_RX_QUALITY |
1457                  MAC_RGMII_MODE_RX_ACTIVITY |
1458                  MAC_RGMII_MODE_RX_ENG_DET |
1459                  MAC_RGMII_MODE_TX_ENABLE |
1460                  MAC_RGMII_MODE_TX_LOWPWR |
1461                  MAC_RGMII_MODE_TX_RESET);
1462         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464                         val |= MAC_RGMII_MODE_RX_INT_B |
1465                                MAC_RGMII_MODE_RX_QUALITY |
1466                                MAC_RGMII_MODE_RX_ACTIVITY |
1467                                MAC_RGMII_MODE_RX_ENG_DET;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_RGMII_MODE_TX_ENABLE |
1470                                MAC_RGMII_MODE_TX_LOWPWR |
1471                                MAC_RGMII_MODE_TX_RESET;
1472         }
1473         tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479         tw32_f(MAC_MI_MODE, tp->mi_mode);
1480         udelay(80);
1481
1482         if (tg3_flag(tp, MDIOBUS_INITED) &&
1483             tg3_asic_rev(tp) == ASIC_REV_5785)
1484                 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489         int i;
1490         u32 reg;
1491         struct phy_device *phydev;
1492
1493         if (tg3_flag(tp, 5717_PLUS)) {
1494                 u32 is_serdes;
1495
1496                 tp->phy_addr = tp->pci_fn + 1;
1497
1498                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500                 else
1501                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1503                 if (is_serdes)
1504                         tp->phy_addr += 7;
1505         } else
1506                 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508         tg3_mdio_start(tp);
1509
1510         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511                 return 0;
1512
1513         tp->mdio_bus = mdiobus_alloc();
1514         if (tp->mdio_bus == NULL)
1515                 return -ENOMEM;
1516
1517         tp->mdio_bus->name     = "tg3 mdio bus";
1518         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520         tp->mdio_bus->priv     = tp;
1521         tp->mdio_bus->parent   = &tp->pdev->dev;
1522         tp->mdio_bus->read     = &tg3_mdio_read;
1523         tp->mdio_bus->write    = &tg3_mdio_write;
1524         tp->mdio_bus->reset    = &tg3_mdio_reset;
1525         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527
1528         for (i = 0; i < PHY_MAX_ADDR; i++)
1529                 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531         /* The bus registration will look for all the PHYs on the mdio bus.
1532          * Unfortunately, it does not ensure the PHY is powered up before
1533          * accessing the PHY ID registers.  A chip reset is the
1534          * quickest way to bring the device back to an operational state..
1535          */
1536         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537                 tg3_bmcr_reset(tp);
1538
1539         i = mdiobus_register(tp->mdio_bus);
1540         if (i) {
1541                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542                 mdiobus_free(tp->mdio_bus);
1543                 return i;
1544         }
1545
1546         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548         if (!phydev || !phydev->drv) {
1549                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550                 mdiobus_unregister(tp->mdio_bus);
1551                 mdiobus_free(tp->mdio_bus);
1552                 return -ENODEV;
1553         }
1554
1555         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556         case PHY_ID_BCM57780:
1557                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559                 break;
1560         case PHY_ID_BCM50610:
1561         case PHY_ID_BCM50610M:
1562                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563                                      PHY_BRCM_RX_REFCLK_UNUSED |
1564                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572                 /* fallthru */
1573         case PHY_ID_RTL8211C:
1574                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575                 break;
1576         case PHY_ID_RTL8201E:
1577         case PHY_ID_BCMAC131:
1578                 phydev->interface = PHY_INTERFACE_MODE_MII;
1579                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581                 break;
1582         }
1583
1584         tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587                 tg3_mdio_config_5785(tp);
1588
1589         return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594         if (tg3_flag(tp, MDIOBUS_INITED)) {
1595                 tg3_flag_clear(tp, MDIOBUS_INITED);
1596                 mdiobus_unregister(tp->mdio_bus);
1597                 mdiobus_free(tp->mdio_bus);
1598         }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604         u32 val;
1605
1606         val = tr32(GRC_RX_CPU_EVENT);
1607         val |= GRC_RX_CPU_DRIVER_EVENT;
1608         tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610         tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618         int i;
1619         unsigned int delay_cnt;
1620         long time_remain;
1621
1622         /* If enough time has passed, no wait is necessary. */
1623         time_remain = (long)(tp->last_event_jiffies + 1 +
1624                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625                       (long)jiffies;
1626         if (time_remain < 0)
1627                 return;
1628
1629         /* Check if we can shorten the wait time. */
1630         delay_cnt = jiffies_to_usecs(time_remain);
1631         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633         delay_cnt = (delay_cnt >> 3) + 1;
1634
1635         for (i = 0; i < delay_cnt; i++) {
1636                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637                         break;
1638                 if (pci_channel_offline(tp->pdev))
1639                         break;
1640
1641                 udelay(8);
1642         }
1643 }
1644
1645 /* tp->lock is held. */
1646 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1647 {
1648         u32 reg, val;
1649
1650         val = 0;
1651         if (!tg3_readphy(tp, MII_BMCR, &reg))
1652                 val = reg << 16;
1653         if (!tg3_readphy(tp, MII_BMSR, &reg))
1654                 val |= (reg & 0xffff);
1655         *data++ = val;
1656
1657         val = 0;
1658         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1659                 val = reg << 16;
1660         if (!tg3_readphy(tp, MII_LPA, &reg))
1661                 val |= (reg & 0xffff);
1662         *data++ = val;
1663
1664         val = 0;
1665         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1666                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1667                         val = reg << 16;
1668                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1669                         val |= (reg & 0xffff);
1670         }
1671         *data++ = val;
1672
1673         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1674                 val = reg << 16;
1675         else
1676                 val = 0;
1677         *data++ = val;
1678 }
1679
1680 /* tp->lock is held. */
1681 static void tg3_ump_link_report(struct tg3 *tp)
1682 {
1683         u32 data[4];
1684
1685         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1686                 return;
1687
1688         tg3_phy_gather_ump_data(tp, data);
1689
1690         tg3_wait_for_event_ack(tp);
1691
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1695         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1696         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1697         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1698
1699         tg3_generate_fw_event(tp);
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_stop_fw(struct tg3 *tp)
1704 {
1705         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1706                 /* Wait for RX cpu to ACK the previous event. */
1707                 tg3_wait_for_event_ack(tp);
1708
1709                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1710
1711                 tg3_generate_fw_event(tp);
1712
1713                 /* Wait for RX cpu to ACK this event. */
1714                 tg3_wait_for_event_ack(tp);
1715         }
1716 }
1717
1718 /* tp->lock is held. */
1719 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1720 {
1721         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1722                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1723
1724         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1725                 switch (kind) {
1726                 case RESET_KIND_INIT:
1727                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728                                       DRV_STATE_START);
1729                         break;
1730
1731                 case RESET_KIND_SHUTDOWN:
1732                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733                                       DRV_STATE_UNLOAD);
1734                         break;
1735
1736                 case RESET_KIND_SUSPEND:
1737                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1738                                       DRV_STATE_SUSPEND);
1739                         break;
1740
1741                 default:
1742                         break;
1743                 }
1744         }
1745 }
1746
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751                 switch (kind) {
1752                 case RESET_KIND_INIT:
1753                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754                                       DRV_STATE_START_DONE);
1755                         break;
1756
1757                 case RESET_KIND_SHUTDOWN:
1758                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759                                       DRV_STATE_UNLOAD_DONE);
1760                         break;
1761
1762                 default:
1763                         break;
1764                 }
1765         }
1766 }
1767
1768 /* tp->lock is held. */
1769 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1770 {
1771         if (tg3_flag(tp, ENABLE_ASF)) {
1772                 switch (kind) {
1773                 case RESET_KIND_INIT:
1774                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1775                                       DRV_STATE_START);
1776                         break;
1777
1778                 case RESET_KIND_SHUTDOWN:
1779                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1780                                       DRV_STATE_UNLOAD);
1781                         break;
1782
1783                 case RESET_KIND_SUSPEND:
1784                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785                                       DRV_STATE_SUSPEND);
1786                         break;
1787
1788                 default:
1789                         break;
1790                 }
1791         }
1792 }
1793
1794 static int tg3_poll_fw(struct tg3 *tp)
1795 {
1796         int i;
1797         u32 val;
1798
1799         if (tg3_flag(tp, NO_FWARE_REPORTED))
1800                 return 0;
1801
1802         if (tg3_flag(tp, IS_SSB_CORE)) {
1803                 /* We don't use firmware. */
1804                 return 0;
1805         }
1806
1807         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808                 /* Wait up to 20ms for init done. */
1809                 for (i = 0; i < 200; i++) {
1810                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811                                 return 0;
1812                         if (pci_channel_offline(tp->pdev))
1813                                 return -ENODEV;
1814
1815                         udelay(100);
1816                 }
1817                 return -ENODEV;
1818         }
1819
1820         /* Wait for firmware initialization to complete. */
1821         for (i = 0; i < 100000; i++) {
1822                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1823                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1824                         break;
1825                 if (pci_channel_offline(tp->pdev)) {
1826                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1827                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1828                                 netdev_info(tp->dev, "No firmware running\n");
1829                         }
1830
1831                         break;
1832                 }
1833
1834                 udelay(10);
1835         }
1836
1837         /* Chip might not be fitted with firmware.  Some Sun onboard
1838          * parts are configured like that.  So don't signal the timeout
1839          * of the above loop as an error, but do report the lack of
1840          * running firmware once.
1841          */
1842         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1843                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1844
1845                 netdev_info(tp->dev, "No firmware running\n");
1846         }
1847
1848         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1849                 /* The 57765 A0 needs a little more
1850                  * time to do some important work.
1851                  */
1852                 mdelay(10);
1853         }
1854
1855         return 0;
1856 }
1857
1858 static void tg3_link_report(struct tg3 *tp)
1859 {
1860         if (!netif_carrier_ok(tp->dev)) {
1861                 netif_info(tp, link, tp->dev, "Link is down\n");
1862                 tg3_ump_link_report(tp);
1863         } else if (netif_msg_link(tp)) {
1864                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1865                             (tp->link_config.active_speed == SPEED_1000 ?
1866                              1000 :
1867                              (tp->link_config.active_speed == SPEED_100 ?
1868                               100 : 10)),
1869                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1870                              "full" : "half"));
1871
1872                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1873                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1874                             "on" : "off",
1875                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1876                             "on" : "off");
1877
1878                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1879                         netdev_info(tp->dev, "EEE is %s\n",
1880                                     tp->setlpicnt ? "enabled" : "disabled");
1881
1882                 tg3_ump_link_report(tp);
1883         }
1884
1885         tp->link_up = netif_carrier_ok(tp->dev);
1886 }
1887
1888 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1889 {
1890         u32 flowctrl = 0;
1891
1892         if (adv & ADVERTISE_PAUSE_CAP) {
1893                 flowctrl |= FLOW_CTRL_RX;
1894                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1895                         flowctrl |= FLOW_CTRL_TX;
1896         } else if (adv & ADVERTISE_PAUSE_ASYM)
1897                 flowctrl |= FLOW_CTRL_TX;
1898
1899         return flowctrl;
1900 }
1901
1902 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1903 {
1904         u16 miireg;
1905
1906         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1907                 miireg = ADVERTISE_1000XPAUSE;
1908         else if (flow_ctrl & FLOW_CTRL_TX)
1909                 miireg = ADVERTISE_1000XPSE_ASYM;
1910         else if (flow_ctrl & FLOW_CTRL_RX)
1911                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1912         else
1913                 miireg = 0;
1914
1915         return miireg;
1916 }
1917
1918 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1919 {
1920         u32 flowctrl = 0;
1921
1922         if (adv & ADVERTISE_1000XPAUSE) {
1923                 flowctrl |= FLOW_CTRL_RX;
1924                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1925                         flowctrl |= FLOW_CTRL_TX;
1926         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1927                 flowctrl |= FLOW_CTRL_TX;
1928
1929         return flowctrl;
1930 }
1931
1932 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1933 {
1934         u8 cap = 0;
1935
1936         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1937                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1938         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1939                 if (lcladv & ADVERTISE_1000XPAUSE)
1940                         cap = FLOW_CTRL_RX;
1941                 if (rmtadv & ADVERTISE_1000XPAUSE)
1942                         cap = FLOW_CTRL_TX;
1943         }
1944
1945         return cap;
1946 }
1947
1948 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1949 {
1950         u8 autoneg;
1951         u8 flowctrl = 0;
1952         u32 old_rx_mode = tp->rx_mode;
1953         u32 old_tx_mode = tp->tx_mode;
1954
1955         if (tg3_flag(tp, USE_PHYLIB))
1956                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1957         else
1958                 autoneg = tp->link_config.autoneg;
1959
1960         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1961                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1962                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1963                 else
1964                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1965         } else
1966                 flowctrl = tp->link_config.flowctrl;
1967
1968         tp->link_config.active_flowctrl = flowctrl;
1969
1970         if (flowctrl & FLOW_CTRL_RX)
1971                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1972         else
1973                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1974
1975         if (old_rx_mode != tp->rx_mode)
1976                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1977
1978         if (flowctrl & FLOW_CTRL_TX)
1979                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1980         else
1981                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1982
1983         if (old_tx_mode != tp->tx_mode)
1984                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1985 }
1986
1987 static void tg3_adjust_link(struct net_device *dev)
1988 {
1989         u8 oldflowctrl, linkmesg = 0;
1990         u32 mac_mode, lcl_adv, rmt_adv;
1991         struct tg3 *tp = netdev_priv(dev);
1992         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1993
1994         spin_lock_bh(&tp->lock);
1995
1996         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1997                                     MAC_MODE_HALF_DUPLEX);
1998
1999         oldflowctrl = tp->link_config.active_flowctrl;
2000
2001         if (phydev->link) {
2002                 lcl_adv = 0;
2003                 rmt_adv = 0;
2004
2005                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2006                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2007                 else if (phydev->speed == SPEED_1000 ||
2008                          tg3_asic_rev(tp) != ASIC_REV_5785)
2009                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2010                 else
2011                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2012
2013                 if (phydev->duplex == DUPLEX_HALF)
2014                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2015                 else {
2016                         lcl_adv = mii_advertise_flowctrl(
2017                                   tp->link_config.flowctrl);
2018
2019                         if (phydev->pause)
2020                                 rmt_adv = LPA_PAUSE_CAP;
2021                         if (phydev->asym_pause)
2022                                 rmt_adv |= LPA_PAUSE_ASYM;
2023                 }
2024
2025                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2026         } else
2027                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028
2029         if (mac_mode != tp->mac_mode) {
2030                 tp->mac_mode = mac_mode;
2031                 tw32_f(MAC_MODE, tp->mac_mode);
2032                 udelay(40);
2033         }
2034
2035         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2036                 if (phydev->speed == SPEED_10)
2037                         tw32(MAC_MI_STAT,
2038                              MAC_MI_STAT_10MBPS_MODE |
2039                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2040                 else
2041                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2042         }
2043
2044         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2045                 tw32(MAC_TX_LENGTHS,
2046                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2047                       (6 << TX_LENGTHS_IPG_SHIFT) |
2048                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2049         else
2050                 tw32(MAC_TX_LENGTHS,
2051                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2052                       (6 << TX_LENGTHS_IPG_SHIFT) |
2053                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2054
2055         if (phydev->link != tp->old_link ||
2056             phydev->speed != tp->link_config.active_speed ||
2057             phydev->duplex != tp->link_config.active_duplex ||
2058             oldflowctrl != tp->link_config.active_flowctrl)
2059                 linkmesg = 1;
2060
2061         tp->old_link = phydev->link;
2062         tp->link_config.active_speed = phydev->speed;
2063         tp->link_config.active_duplex = phydev->duplex;
2064
2065         spin_unlock_bh(&tp->lock);
2066
2067         if (linkmesg)
2068                 tg3_link_report(tp);
2069 }
2070
2071 static int tg3_phy_init(struct tg3 *tp)
2072 {
2073         struct phy_device *phydev;
2074
2075         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2076                 return 0;
2077
2078         /* Bring the PHY back to a known state. */
2079         tg3_bmcr_reset(tp);
2080
2081         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2082
2083         /* Attach the MAC to the PHY. */
2084         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2085                              tg3_adjust_link, phydev->interface);
2086         if (IS_ERR(phydev)) {
2087                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2088                 return PTR_ERR(phydev);
2089         }
2090
2091         /* Mask with MAC supported features. */
2092         switch (phydev->interface) {
2093         case PHY_INTERFACE_MODE_GMII:
2094         case PHY_INTERFACE_MODE_RGMII:
2095                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2096                         phydev->supported &= (PHY_GBIT_FEATURES |
2097                                               SUPPORTED_Pause |
2098                                               SUPPORTED_Asym_Pause);
2099                         break;
2100                 }
2101                 /* fallthru */
2102         case PHY_INTERFACE_MODE_MII:
2103                 phydev->supported &= (PHY_BASIC_FEATURES |
2104                                       SUPPORTED_Pause |
2105                                       SUPPORTED_Asym_Pause);
2106                 break;
2107         default:
2108                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2109                 return -EINVAL;
2110         }
2111
2112         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2113
2114         phydev->advertising = phydev->supported;
2115
2116         return 0;
2117 }
2118
2119 static void tg3_phy_start(struct tg3 *tp)
2120 {
2121         struct phy_device *phydev;
2122
2123         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2124                 return;
2125
2126         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2127
2128         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2129                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2130                 phydev->speed = tp->link_config.speed;
2131                 phydev->duplex = tp->link_config.duplex;
2132                 phydev->autoneg = tp->link_config.autoneg;
2133                 phydev->advertising = tp->link_config.advertising;
2134         }
2135
2136         phy_start(phydev);
2137
2138         phy_start_aneg(phydev);
2139 }
2140
2141 static void tg3_phy_stop(struct tg3 *tp)
2142 {
2143         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2144                 return;
2145
2146         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2147 }
2148
2149 static void tg3_phy_fini(struct tg3 *tp)
2150 {
2151         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2152                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2153                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2154         }
2155 }
2156
2157 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2158 {
2159         int err;
2160         u32 val;
2161
2162         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2163                 return 0;
2164
2165         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2166                 /* Cannot do read-modify-write on 5401 */
2167                 err = tg3_phy_auxctl_write(tp,
2168                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2169                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2170                                            0x4c20);
2171                 goto done;
2172         }
2173
2174         err = tg3_phy_auxctl_read(tp,
2175                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2176         if (err)
2177                 return err;
2178
2179         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2180         err = tg3_phy_auxctl_write(tp,
2181                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2182
2183 done:
2184         return err;
2185 }
2186
2187 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2188 {
2189         u32 phytest;
2190
2191         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2192                 u32 phy;
2193
2194                 tg3_writephy(tp, MII_TG3_FET_TEST,
2195                              phytest | MII_TG3_FET_SHADOW_EN);
2196                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2197                         if (enable)
2198                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2199                         else
2200                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2201                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2202                 }
2203                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2204         }
2205 }
2206
2207 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2208 {
2209         u32 reg;
2210
2211         if (!tg3_flag(tp, 5705_PLUS) ||
2212             (tg3_flag(tp, 5717_PLUS) &&
2213              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2214                 return;
2215
2216         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2217                 tg3_phy_fet_toggle_apd(tp, enable);
2218                 return;
2219         }
2220
2221         reg = MII_TG3_MISC_SHDW_WREN |
2222               MII_TG3_MISC_SHDW_SCR5_SEL |
2223               MII_TG3_MISC_SHDW_SCR5_LPED |
2224               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2225               MII_TG3_MISC_SHDW_SCR5_SDTL |
2226               MII_TG3_MISC_SHDW_SCR5_C125OE;
2227         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2228                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2229
2230         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2231
2232
2233         reg = MII_TG3_MISC_SHDW_WREN |
2234               MII_TG3_MISC_SHDW_APD_SEL |
2235               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2236         if (enable)
2237                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2238
2239         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2240 }
2241
2242 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2243 {
2244         u32 phy;
2245
2246         if (!tg3_flag(tp, 5705_PLUS) ||
2247             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2248                 return;
2249
2250         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2251                 u32 ephy;
2252
2253                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2254                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2255
2256                         tg3_writephy(tp, MII_TG3_FET_TEST,
2257                                      ephy | MII_TG3_FET_SHADOW_EN);
2258                         if (!tg3_readphy(tp, reg, &phy)) {
2259                                 if (enable)
2260                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2261                                 else
2262                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2263                                 tg3_writephy(tp, reg, phy);
2264                         }
2265                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2266                 }
2267         } else {
2268                 int ret;
2269
2270                 ret = tg3_phy_auxctl_read(tp,
2271                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2272                 if (!ret) {
2273                         if (enable)
2274                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2275                         else
2276                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2277                         tg3_phy_auxctl_write(tp,
2278                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2279                 }
2280         }
2281 }
2282
2283 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2284 {
2285         int ret;
2286         u32 val;
2287
2288         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2289                 return;
2290
2291         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2292         if (!ret)
2293                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2294                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2295 }
2296
2297 static void tg3_phy_apply_otp(struct tg3 *tp)
2298 {
2299         u32 otp, phy;
2300
2301         if (!tp->phy_otp)
2302                 return;
2303
2304         otp = tp->phy_otp;
2305
2306         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2307                 return;
2308
2309         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2310         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2311         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2312
2313         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2314               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2315         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2316
2317         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2318         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2319         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2320
2321         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2322         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2323
2324         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2325         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2326
2327         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2328               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2329         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2330
2331         tg3_phy_toggle_auxctl_smdsp(tp, false);
2332 }
2333
2334 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2335 {
2336         u32 val;
2337         struct ethtool_eee *dest = &tp->eee;
2338
2339         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2340                 return;
2341
2342         if (eee)
2343                 dest = eee;
2344
2345         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2346                 return;
2347
2348         /* Pull eee_active */
2349         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2351                 dest->eee_active = 1;
2352         } else
2353                 dest->eee_active = 0;
2354
2355         /* Pull lp advertised settings */
2356         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2357                 return;
2358         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2359
2360         /* Pull advertised and eee_enabled settings */
2361         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2362                 return;
2363         dest->eee_enabled = !!val;
2364         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2365
2366         /* Pull tx_lpi_enabled */
2367         val = tr32(TG3_CPMU_EEE_MODE);
2368         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2369
2370         /* Pull lpi timer value */
2371         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2372 }
2373
2374 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2375 {
2376         u32 val;
2377
2378         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2379                 return;
2380
2381         tp->setlpicnt = 0;
2382
2383         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2384             current_link_up &&
2385             tp->link_config.active_duplex == DUPLEX_FULL &&
2386             (tp->link_config.active_speed == SPEED_100 ||
2387              tp->link_config.active_speed == SPEED_1000)) {
2388                 u32 eeectl;
2389
2390                 if (tp->link_config.active_speed == SPEED_1000)
2391                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2392                 else
2393                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2394
2395                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2396
2397                 tg3_eee_pull_config(tp, NULL);
2398                 if (tp->eee.eee_active)
2399                         tp->setlpicnt = 2;
2400         }
2401
2402         if (!tp->setlpicnt) {
2403                 if (current_link_up &&
2404                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2405                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2406                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2407                 }
2408
2409                 val = tr32(TG3_CPMU_EEE_MODE);
2410                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2411         }
2412 }
2413
2414 static void tg3_phy_eee_enable(struct tg3 *tp)
2415 {
2416         u32 val;
2417
2418         if (tp->link_config.active_speed == SPEED_1000 &&
2419             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2420              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2421              tg3_flag(tp, 57765_CLASS)) &&
2422             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423                 val = MII_TG3_DSP_TAP26_ALNOKO |
2424                       MII_TG3_DSP_TAP26_RMRXSTO;
2425                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2426                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2427         }
2428
2429         val = tr32(TG3_CPMU_EEE_MODE);
2430         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2431 }
2432
2433 static int tg3_wait_macro_done(struct tg3 *tp)
2434 {
2435         int limit = 100;
2436
2437         while (limit--) {
2438                 u32 tmp32;
2439
2440                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2441                         if ((tmp32 & 0x1000) == 0)
2442                                 break;
2443                 }
2444         }
2445         if (limit < 0)
2446                 return -EBUSY;
2447
2448         return 0;
2449 }
2450
2451 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2452 {
2453         static const u32 test_pat[4][6] = {
2454         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2455         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2456         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2457         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2458         };
2459         int chan;
2460
2461         for (chan = 0; chan < 4; chan++) {
2462                 int i;
2463
2464                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2465                              (chan * 0x2000) | 0x0200);
2466                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2467
2468                 for (i = 0; i < 6; i++)
2469                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2470                                      test_pat[chan][i]);
2471
2472                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2473                 if (tg3_wait_macro_done(tp)) {
2474                         *resetp = 1;
2475                         return -EBUSY;
2476                 }
2477
2478                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479                              (chan * 0x2000) | 0x0200);
2480                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2481                 if (tg3_wait_macro_done(tp)) {
2482                         *resetp = 1;
2483                         return -EBUSY;
2484                 }
2485
2486                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2487                 if (tg3_wait_macro_done(tp)) {
2488                         *resetp = 1;
2489                         return -EBUSY;
2490                 }
2491
2492                 for (i = 0; i < 6; i += 2) {
2493                         u32 low, high;
2494
2495                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2496                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2497                             tg3_wait_macro_done(tp)) {
2498                                 *resetp = 1;
2499                                 return -EBUSY;
2500                         }
2501                         low &= 0x7fff;
2502                         high &= 0x000f;
2503                         if (low != test_pat[chan][i] ||
2504                             high != test_pat[chan][i+1]) {
2505                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2506                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2507                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2508
2509                                 return -EBUSY;
2510                         }
2511                 }
2512         }
2513
2514         return 0;
2515 }
2516
2517 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2518 {
2519         int chan;
2520
2521         for (chan = 0; chan < 4; chan++) {
2522                 int i;
2523
2524                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2525                              (chan * 0x2000) | 0x0200);
2526                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2527                 for (i = 0; i < 6; i++)
2528                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2529                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2530                 if (tg3_wait_macro_done(tp))
2531                         return -EBUSY;
2532         }
2533
2534         return 0;
2535 }
2536
2537 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2538 {
2539         u32 reg32, phy9_orig;
2540         int retries, do_phy_reset, err;
2541
2542         retries = 10;
2543         do_phy_reset = 1;
2544         do {
2545                 if (do_phy_reset) {
2546                         err = tg3_bmcr_reset(tp);
2547                         if (err)
2548                                 return err;
2549                         do_phy_reset = 0;
2550                 }
2551
2552                 /* Disable transmitter and interrupt.  */
2553                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2554                         continue;
2555
2556                 reg32 |= 0x3000;
2557                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2558
2559                 /* Set full-duplex, 1000 mbps.  */
2560                 tg3_writephy(tp, MII_BMCR,
2561                              BMCR_FULLDPLX | BMCR_SPEED1000);
2562
2563                 /* Set to master mode.  */
2564                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2565                         continue;
2566
2567                 tg3_writephy(tp, MII_CTRL1000,
2568                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2569
2570                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2571                 if (err)
2572                         return err;
2573
2574                 /* Block the PHY control access.  */
2575                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2576
2577                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2578                 if (!err)
2579                         break;
2580         } while (--retries);
2581
2582         err = tg3_phy_reset_chanpat(tp);
2583         if (err)
2584                 return err;
2585
2586         tg3_phydsp_write(tp, 0x8005, 0x0000);
2587
2588         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2589         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2590
2591         tg3_phy_toggle_auxctl_smdsp(tp, false);
2592
2593         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2594
2595         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2596                 reg32 &= ~0x3000;
2597                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2598         } else if (!err)
2599                 err = -EBUSY;
2600
2601         return err;
2602 }
2603
2604 static void tg3_carrier_off(struct tg3 *tp)
2605 {
2606         netif_carrier_off(tp->dev);
2607         tp->link_up = false;
2608 }
2609
2610 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2611 {
2612         if (tg3_flag(tp, ENABLE_ASF))
2613                 netdev_warn(tp->dev,
2614                             "Management side-band traffic will be interrupted during phy settings change\n");
2615 }
2616
2617 /* This will reset the tigon3 PHY if there is no valid
2618  * link unless the FORCE argument is non-zero.
2619  */
2620 static int tg3_phy_reset(struct tg3 *tp)
2621 {
2622         u32 val, cpmuctrl;
2623         int err;
2624
2625         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2626                 val = tr32(GRC_MISC_CFG);
2627                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2628                 udelay(40);
2629         }
2630         err  = tg3_readphy(tp, MII_BMSR, &val);
2631         err |= tg3_readphy(tp, MII_BMSR, &val);
2632         if (err != 0)
2633                 return -EBUSY;
2634
2635         if (netif_running(tp->dev) && tp->link_up) {
2636                 netif_carrier_off(tp->dev);
2637                 tg3_link_report(tp);
2638         }
2639
2640         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2641             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2642             tg3_asic_rev(tp) == ASIC_REV_5705) {
2643                 err = tg3_phy_reset_5703_4_5(tp);
2644                 if (err)
2645                         return err;
2646                 goto out;
2647         }
2648
2649         cpmuctrl = 0;
2650         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2651             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2652                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2653                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2654                         tw32(TG3_CPMU_CTRL,
2655                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2656         }
2657
2658         err = tg3_bmcr_reset(tp);
2659         if (err)
2660                 return err;
2661
2662         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2663                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2664                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2665
2666                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2667         }
2668
2669         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2670             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2671                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2672                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2673                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2674                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2675                         udelay(40);
2676                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2677                 }
2678         }
2679
2680         if (tg3_flag(tp, 5717_PLUS) &&
2681             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2682                 return 0;
2683
2684         tg3_phy_apply_otp(tp);
2685
2686         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2687                 tg3_phy_toggle_apd(tp, true);
2688         else
2689                 tg3_phy_toggle_apd(tp, false);
2690
2691 out:
2692         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2693             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2694                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2695                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2696                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2697         }
2698
2699         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2700                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2701                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2702         }
2703
2704         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2705                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2706                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2707                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2708                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2709                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2710                 }
2711         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2712                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2714                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2715                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2716                                 tg3_writephy(tp, MII_TG3_TEST1,
2717                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2718                         } else
2719                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2720
2721                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2722                 }
2723         }
2724
2725         /* Set Extended packet length bit (bit 14) on all chips that */
2726         /* support jumbo frames */
2727         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2728                 /* Cannot do read-modify-write on 5401 */
2729                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2730         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2731                 /* Set bit 14 with read-modify-write to preserve other bits */
2732                 err = tg3_phy_auxctl_read(tp,
2733                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2734                 if (!err)
2735                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2736                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2737         }
2738
2739         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2740          * jumbo frames transmission.
2741          */
2742         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2743                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2744                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2745                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2746         }
2747
2748         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2749                 /* adjust output voltage */
2750                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2751         }
2752
2753         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2754                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2755
2756         tg3_phy_toggle_automdix(tp, true);
2757         tg3_phy_set_wirespeed(tp);
2758         return 0;
2759 }
2760
2761 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2762 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2763 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2764                                           TG3_GPIO_MSG_NEED_VAUX)
2765 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2766         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2767          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2768          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2769          (TG3_GPIO_MSG_DRVR_PRES << 12))
2770
2771 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2772         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2773          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2774          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2775          (TG3_GPIO_MSG_NEED_VAUX << 12))
2776
2777 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2778 {
2779         u32 status, shift;
2780
2781         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2782             tg3_asic_rev(tp) == ASIC_REV_5719)
2783                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2784         else
2785                 status = tr32(TG3_CPMU_DRV_STATUS);
2786
2787         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2788         status &= ~(TG3_GPIO_MSG_MASK << shift);
2789         status |= (newstat << shift);
2790
2791         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2792             tg3_asic_rev(tp) == ASIC_REV_5719)
2793                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2794         else
2795                 tw32(TG3_CPMU_DRV_STATUS, status);
2796
2797         return status >> TG3_APE_GPIO_MSG_SHIFT;
2798 }
2799
2800 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2801 {
2802         if (!tg3_flag(tp, IS_NIC))
2803                 return 0;
2804
2805         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2807             tg3_asic_rev(tp) == ASIC_REV_5720) {
2808                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2809                         return -EIO;
2810
2811                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2812
2813                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2814                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2815
2816                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2817         } else {
2818                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2819                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2820         }
2821
2822         return 0;
2823 }
2824
2825 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2826 {
2827         u32 grc_local_ctrl;
2828
2829         if (!tg3_flag(tp, IS_NIC) ||
2830             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2831             tg3_asic_rev(tp) == ASIC_REV_5701)
2832                 return;
2833
2834         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2835
2836         tw32_wait_f(GRC_LOCAL_CTRL,
2837                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2838                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2839
2840         tw32_wait_f(GRC_LOCAL_CTRL,
2841                     grc_local_ctrl,
2842                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2843
2844         tw32_wait_f(GRC_LOCAL_CTRL,
2845                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2847 }
2848
2849 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2850 {
2851         if (!tg3_flag(tp, IS_NIC))
2852                 return;
2853
2854         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2855             tg3_asic_rev(tp) == ASIC_REV_5701) {
2856                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2857                             (GRC_LCLCTRL_GPIO_OE0 |
2858                              GRC_LCLCTRL_GPIO_OE1 |
2859                              GRC_LCLCTRL_GPIO_OE2 |
2860                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2861                              GRC_LCLCTRL_GPIO_OUTPUT1),
2862                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2863         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2864                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2865                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2866                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2867                                      GRC_LCLCTRL_GPIO_OE1 |
2868                                      GRC_LCLCTRL_GPIO_OE2 |
2869                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2870                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2871                                      tp->grc_local_ctrl;
2872                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2873                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2874
2875                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2876                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2877                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2878
2879                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2880                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2882         } else {
2883                 u32 no_gpio2;
2884                 u32 grc_local_ctrl = 0;
2885
2886                 /* Workaround to prevent overdrawing Amps. */
2887                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2888                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2889                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2890                                     grc_local_ctrl,
2891                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2892                 }
2893
2894                 /* On 5753 and variants, GPIO2 cannot be used. */
2895                 no_gpio2 = tp->nic_sram_data_cfg &
2896                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2897
2898                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2899                                   GRC_LCLCTRL_GPIO_OE1 |
2900                                   GRC_LCLCTRL_GPIO_OE2 |
2901                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2902                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2903                 if (no_gpio2) {
2904                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2905                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2906                 }
2907                 tw32_wait_f(GRC_LOCAL_CTRL,
2908                             tp->grc_local_ctrl | grc_local_ctrl,
2909                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2910
2911                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2912
2913                 tw32_wait_f(GRC_LOCAL_CTRL,
2914                             tp->grc_local_ctrl | grc_local_ctrl,
2915                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2916
2917                 if (!no_gpio2) {
2918                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2919                         tw32_wait_f(GRC_LOCAL_CTRL,
2920                                     tp->grc_local_ctrl | grc_local_ctrl,
2921                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2922                 }
2923         }
2924 }
2925
2926 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2927 {
2928         u32 msg = 0;
2929
2930         /* Serialize power state transitions */
2931         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2932                 return;
2933
2934         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2935                 msg = TG3_GPIO_MSG_NEED_VAUX;
2936
2937         msg = tg3_set_function_status(tp, msg);
2938
2939         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2940                 goto done;
2941
2942         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2943                 tg3_pwrsrc_switch_to_vaux(tp);
2944         else
2945                 tg3_pwrsrc_die_with_vmain(tp);
2946
2947 done:
2948         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2949 }
2950
2951 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2952 {
2953         bool need_vaux = false;
2954
2955         /* The GPIOs do something completely different on 57765. */
2956         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2957                 return;
2958
2959         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2960             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2961             tg3_asic_rev(tp) == ASIC_REV_5720) {
2962                 tg3_frob_aux_power_5717(tp, include_wol ?
2963                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2964                 return;
2965         }
2966
2967         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2968                 struct net_device *dev_peer;
2969
2970                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2971
2972                 /* remove_one() may have been run on the peer. */
2973                 if (dev_peer) {
2974                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2975
2976                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2977                                 return;
2978
2979                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2980                             tg3_flag(tp_peer, ENABLE_ASF))
2981                                 need_vaux = true;
2982                 }
2983         }
2984
2985         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2986             tg3_flag(tp, ENABLE_ASF))
2987                 need_vaux = true;
2988
2989         if (need_vaux)
2990                 tg3_pwrsrc_switch_to_vaux(tp);
2991         else
2992                 tg3_pwrsrc_die_with_vmain(tp);
2993 }
2994
2995 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2996 {
2997         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2998                 return 1;
2999         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3000                 if (speed != SPEED_10)
3001                         return 1;
3002         } else if (speed == SPEED_10)
3003                 return 1;
3004
3005         return 0;
3006 }
3007
3008 static bool tg3_phy_power_bug(struct tg3 *tp)
3009 {
3010         switch (tg3_asic_rev(tp)) {
3011         case ASIC_REV_5700:
3012         case ASIC_REV_5704:
3013                 return true;
3014         case ASIC_REV_5780:
3015                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3016                         return true;
3017                 return false;
3018         case ASIC_REV_5717:
3019                 if (!tp->pci_fn)
3020                         return true;
3021                 return false;
3022         case ASIC_REV_5719:
3023         case ASIC_REV_5720:
3024                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3025                     !tp->pci_fn)
3026                         return true;
3027                 return false;
3028         }
3029
3030         return false;
3031 }
3032
3033 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3034 {
3035         u32 val;
3036
3037         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3038                 return;
3039
3040         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3041                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3042                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3043                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3044
3045                         sg_dig_ctrl |=
3046                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3047                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3048                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3049                 }
3050                 return;
3051         }
3052
3053         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3054                 tg3_bmcr_reset(tp);
3055                 val = tr32(GRC_MISC_CFG);
3056                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3057                 udelay(40);
3058                 return;
3059         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3060                 u32 phytest;
3061                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3062                         u32 phy;
3063
3064                         tg3_writephy(tp, MII_ADVERTISE, 0);
3065                         tg3_writephy(tp, MII_BMCR,
3066                                      BMCR_ANENABLE | BMCR_ANRESTART);
3067
3068                         tg3_writephy(tp, MII_TG3_FET_TEST,
3069                                      phytest | MII_TG3_FET_SHADOW_EN);
3070                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3071                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3072                                 tg3_writephy(tp,
3073                                              MII_TG3_FET_SHDW_AUXMODE4,
3074                                              phy);
3075                         }
3076                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3077                 }
3078                 return;
3079         } else if (do_low_power) {
3080                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3081                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3082
3083                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3084                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3085                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3086                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3087         }
3088
3089         /* The PHY should not be powered down on some chips because
3090          * of bugs.
3091          */
3092         if (tg3_phy_power_bug(tp))
3093                 return;
3094
3095         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3096             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3097                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3098                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3099                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3100                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3101         }
3102
3103         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3104 }
3105
3106 /* tp->lock is held. */
3107 static int tg3_nvram_lock(struct tg3 *tp)
3108 {
3109         if (tg3_flag(tp, NVRAM)) {
3110                 int i;
3111
3112                 if (tp->nvram_lock_cnt == 0) {
3113                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3114                         for (i = 0; i < 8000; i++) {
3115                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3116                                         break;
3117                                 udelay(20);
3118                         }
3119                         if (i == 8000) {
3120                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3121                                 return -ENODEV;
3122                         }
3123                 }
3124                 tp->nvram_lock_cnt++;
3125         }
3126         return 0;
3127 }
3128
3129 /* tp->lock is held. */
3130 static void tg3_nvram_unlock(struct tg3 *tp)
3131 {
3132         if (tg3_flag(tp, NVRAM)) {
3133                 if (tp->nvram_lock_cnt > 0)
3134                         tp->nvram_lock_cnt--;
3135                 if (tp->nvram_lock_cnt == 0)
3136                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3137         }
3138 }
3139
3140 /* tp->lock is held. */
3141 static void tg3_enable_nvram_access(struct tg3 *tp)
3142 {
3143         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3144                 u32 nvaccess = tr32(NVRAM_ACCESS);
3145
3146                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3147         }
3148 }
3149
3150 /* tp->lock is held. */
3151 static void tg3_disable_nvram_access(struct tg3 *tp)
3152 {
3153         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3154                 u32 nvaccess = tr32(NVRAM_ACCESS);
3155
3156                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3157         }
3158 }
3159
3160 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3161                                         u32 offset, u32 *val)
3162 {
3163         u32 tmp;
3164         int i;
3165
3166         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3167                 return -EINVAL;
3168
3169         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3170                                         EEPROM_ADDR_DEVID_MASK |
3171                                         EEPROM_ADDR_READ);
3172         tw32(GRC_EEPROM_ADDR,
3173              tmp |
3174              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3175              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3176               EEPROM_ADDR_ADDR_MASK) |
3177              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3178
3179         for (i = 0; i < 1000; i++) {
3180                 tmp = tr32(GRC_EEPROM_ADDR);
3181
3182                 if (tmp & EEPROM_ADDR_COMPLETE)
3183                         break;
3184                 msleep(1);
3185         }
3186         if (!(tmp & EEPROM_ADDR_COMPLETE))
3187                 return -EBUSY;
3188
3189         tmp = tr32(GRC_EEPROM_DATA);
3190
3191         /*
3192          * The data will always be opposite the native endian
3193          * format.  Perform a blind byteswap to compensate.
3194          */
3195         *val = swab32(tmp);
3196
3197         return 0;
3198 }
3199
3200 #define NVRAM_CMD_TIMEOUT 10000
3201
3202 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3203 {
3204         int i;
3205
3206         tw32(NVRAM_CMD, nvram_cmd);
3207         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3208                 udelay(10);
3209                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3210                         udelay(10);
3211                         break;
3212                 }
3213         }
3214
3215         if (i == NVRAM_CMD_TIMEOUT)
3216                 return -EBUSY;
3217
3218         return 0;
3219 }
3220
3221 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3222 {
3223         if (tg3_flag(tp, NVRAM) &&
3224             tg3_flag(tp, NVRAM_BUFFERED) &&
3225             tg3_flag(tp, FLASH) &&
3226             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3227             (tp->nvram_jedecnum == JEDEC_ATMEL))
3228
3229                 addr = ((addr / tp->nvram_pagesize) <<
3230                         ATMEL_AT45DB0X1B_PAGE_POS) +
3231                        (addr % tp->nvram_pagesize);
3232
3233         return addr;
3234 }
3235
3236 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3237 {
3238         if (tg3_flag(tp, NVRAM) &&
3239             tg3_flag(tp, NVRAM_BUFFERED) &&
3240             tg3_flag(tp, FLASH) &&
3241             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3242             (tp->nvram_jedecnum == JEDEC_ATMEL))
3243
3244                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3245                         tp->nvram_pagesize) +
3246                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3247
3248         return addr;
3249 }
3250
3251 /* NOTE: Data read in from NVRAM is byteswapped according to
3252  * the byteswapping settings for all other register accesses.
3253  * tg3 devices are BE devices, so on a BE machine, the data
3254  * returned will be exactly as it is seen in NVRAM.  On a LE
3255  * machine, the 32-bit value will be byteswapped.
3256  */
3257 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3258 {
3259         int ret;
3260
3261         if (!tg3_flag(tp, NVRAM))
3262                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3263
3264         offset = tg3_nvram_phys_addr(tp, offset);
3265
3266         if (offset > NVRAM_ADDR_MSK)
3267                 return -EINVAL;
3268
3269         ret = tg3_nvram_lock(tp);
3270         if (ret)
3271                 return ret;
3272
3273         tg3_enable_nvram_access(tp);
3274
3275         tw32(NVRAM_ADDR, offset);
3276         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3277                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3278
3279         if (ret == 0)
3280                 *val = tr32(NVRAM_RDDATA);
3281
3282         tg3_disable_nvram_access(tp);
3283
3284         tg3_nvram_unlock(tp);
3285
3286         return ret;
3287 }
3288
3289 /* Ensures NVRAM data is in bytestream format. */
3290 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3291 {
3292         u32 v;
3293         int res = tg3_nvram_read(tp, offset, &v);
3294         if (!res)
3295                 *val = cpu_to_be32(v);
3296         return res;
3297 }
3298
3299 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3300                                     u32 offset, u32 len, u8 *buf)
3301 {
3302         int i, j, rc = 0;
3303         u32 val;
3304
3305         for (i = 0; i < len; i += 4) {
3306                 u32 addr;
3307                 __be32 data;
3308
3309                 addr = offset + i;
3310
3311                 memcpy(&data, buf + i, 4);
3312
3313                 /*
3314                  * The SEEPROM interface expects the data to always be opposite
3315                  * the native endian format.  We accomplish this by reversing
3316                  * all the operations that would have been performed on the
3317                  * data from a call to tg3_nvram_read_be32().
3318                  */
3319                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3320
3321                 val = tr32(GRC_EEPROM_ADDR);
3322                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3323
3324                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3325                         EEPROM_ADDR_READ);
3326                 tw32(GRC_EEPROM_ADDR, val |
3327                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3328                         (addr & EEPROM_ADDR_ADDR_MASK) |
3329                         EEPROM_ADDR_START |
3330                         EEPROM_ADDR_WRITE);
3331
3332                 for (j = 0; j < 1000; j++) {
3333                         val = tr32(GRC_EEPROM_ADDR);
3334
3335                         if (val & EEPROM_ADDR_COMPLETE)
3336                                 break;
3337                         msleep(1);
3338                 }
3339                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3340                         rc = -EBUSY;
3341                         break;
3342                 }
3343         }
3344
3345         return rc;
3346 }
3347
3348 /* offset and length are dword aligned */
3349 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3350                 u8 *buf)
3351 {
3352         int ret = 0;
3353         u32 pagesize = tp->nvram_pagesize;
3354         u32 pagemask = pagesize - 1;
3355         u32 nvram_cmd;
3356         u8 *tmp;
3357
3358         tmp = kmalloc(pagesize, GFP_KERNEL);
3359         if (tmp == NULL)
3360                 return -ENOMEM;
3361
3362         while (len) {
3363                 int j;
3364                 u32 phy_addr, page_off, size;
3365
3366                 phy_addr = offset & ~pagemask;
3367
3368                 for (j = 0; j < pagesize; j += 4) {
3369                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3370                                                   (__be32 *) (tmp + j));
3371                         if (ret)
3372                                 break;
3373                 }
3374                 if (ret)
3375                         break;
3376
3377                 page_off = offset & pagemask;
3378                 size = pagesize;
3379                 if (len < size)
3380                         size = len;
3381
3382                 len -= size;
3383
3384                 memcpy(tmp + page_off, buf, size);
3385
3386                 offset = offset + (pagesize - page_off);
3387
3388                 tg3_enable_nvram_access(tp);
3389
3390                 /*
3391                  * Before we can erase the flash page, we need
3392                  * to issue a special "write enable" command.
3393                  */
3394                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3395
3396                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3397                         break;
3398
3399                 /* Erase the target page */
3400                 tw32(NVRAM_ADDR, phy_addr);
3401
3402                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3403                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3404
3405                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3406                         break;
3407
3408                 /* Issue another write enable to start the write. */
3409                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3410
3411                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3412                         break;
3413
3414                 for (j = 0; j < pagesize; j += 4) {
3415                         __be32 data;
3416
3417                         data = *((__be32 *) (tmp + j));
3418
3419                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3420
3421                         tw32(NVRAM_ADDR, phy_addr + j);
3422
3423                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3424                                 NVRAM_CMD_WR;
3425
3426                         if (j == 0)
3427                                 nvram_cmd |= NVRAM_CMD_FIRST;
3428                         else if (j == (pagesize - 4))
3429                                 nvram_cmd |= NVRAM_CMD_LAST;
3430
3431                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3432                         if (ret)
3433                                 break;
3434                 }
3435                 if (ret)
3436                         break;
3437         }
3438
3439         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3440         tg3_nvram_exec_cmd(tp, nvram_cmd);
3441
3442         kfree(tmp);
3443
3444         return ret;
3445 }
3446
3447 /* offset and length are dword aligned */
3448 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3449                 u8 *buf)
3450 {
3451         int i, ret = 0;
3452
3453         for (i = 0; i < len; i += 4, offset += 4) {
3454                 u32 page_off, phy_addr, nvram_cmd;
3455                 __be32 data;
3456
3457                 memcpy(&data, buf + i, 4);
3458                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3459
3460                 page_off = offset % tp->nvram_pagesize;
3461
3462                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3463
3464                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3465
3466                 if (page_off == 0 || i == 0)
3467                         nvram_cmd |= NVRAM_CMD_FIRST;
3468                 if (page_off == (tp->nvram_pagesize - 4))
3469                         nvram_cmd |= NVRAM_CMD_LAST;
3470
3471                 if (i == (len - 4))
3472                         nvram_cmd |= NVRAM_CMD_LAST;
3473
3474                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3475                     !tg3_flag(tp, FLASH) ||
3476                     !tg3_flag(tp, 57765_PLUS))
3477                         tw32(NVRAM_ADDR, phy_addr);
3478
3479                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3480                     !tg3_flag(tp, 5755_PLUS) &&
3481                     (tp->nvram_jedecnum == JEDEC_ST) &&
3482                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3483                         u32 cmd;
3484
3485                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3486                         ret = tg3_nvram_exec_cmd(tp, cmd);
3487                         if (ret)
3488                                 break;
3489                 }
3490                 if (!tg3_flag(tp, FLASH)) {
3491                         /* We always do complete word writes to eeprom. */
3492                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3493                 }
3494
3495                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3496                 if (ret)
3497                         break;
3498         }
3499         return ret;
3500 }
3501
3502 /* offset and length are dword aligned */
3503 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3504 {
3505         int ret;
3506
3507         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3508                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3509                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3510                 udelay(40);
3511         }
3512
3513         if (!tg3_flag(tp, NVRAM)) {
3514                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3515         } else {
3516                 u32 grc_mode;
3517
3518                 ret = tg3_nvram_lock(tp);
3519                 if (ret)
3520                         return ret;
3521
3522                 tg3_enable_nvram_access(tp);
3523                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3524                         tw32(NVRAM_WRITE1, 0x406);
3525
3526                 grc_mode = tr32(GRC_MODE);
3527                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3528
3529                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3530                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3531                                 buf);
3532                 } else {
3533                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3534                                 buf);
3535                 }
3536
3537                 grc_mode = tr32(GRC_MODE);
3538                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3539
3540                 tg3_disable_nvram_access(tp);
3541                 tg3_nvram_unlock(tp);
3542         }
3543
3544         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3545                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3546                 udelay(40);
3547         }
3548
3549         return ret;
3550 }
3551
3552 #define RX_CPU_SCRATCH_BASE     0x30000
3553 #define RX_CPU_SCRATCH_SIZE     0x04000
3554 #define TX_CPU_SCRATCH_BASE     0x34000
3555 #define TX_CPU_SCRATCH_SIZE     0x04000
3556
3557 /* tp->lock is held. */
3558 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3559 {
3560         int i;
3561         const int iters = 10000;
3562
3563         for (i = 0; i < iters; i++) {
3564                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3565                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3566                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3567                         break;
3568                 if (pci_channel_offline(tp->pdev))
3569                         return -EBUSY;
3570         }
3571
3572         return (i == iters) ? -EBUSY : 0;
3573 }
3574
3575 /* tp->lock is held. */
3576 static int tg3_rxcpu_pause(struct tg3 *tp)
3577 {
3578         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3579
3580         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3581         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3582         udelay(10);
3583
3584         return rc;
3585 }
3586
3587 /* tp->lock is held. */
3588 static int tg3_txcpu_pause(struct tg3 *tp)
3589 {
3590         return tg3_pause_cpu(tp, TX_CPU_BASE);
3591 }
3592
3593 /* tp->lock is held. */
3594 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3595 {
3596         tw32(cpu_base + CPU_STATE, 0xffffffff);
3597         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3598 }
3599
3600 /* tp->lock is held. */
3601 static void tg3_rxcpu_resume(struct tg3 *tp)
3602 {
3603         tg3_resume_cpu(tp, RX_CPU_BASE);
3604 }
3605
3606 /* tp->lock is held. */
3607 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3608 {
3609         int rc;
3610
3611         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3612
3613         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3614                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3615
3616                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3617                 return 0;
3618         }
3619         if (cpu_base == RX_CPU_BASE) {
3620                 rc = tg3_rxcpu_pause(tp);
3621         } else {
3622                 /*
3623                  * There is only an Rx CPU for the 5750 derivative in the
3624                  * BCM4785.
3625                  */
3626                 if (tg3_flag(tp, IS_SSB_CORE))
3627                         return 0;
3628
3629                 rc = tg3_txcpu_pause(tp);
3630         }
3631
3632         if (rc) {
3633                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3634                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3635                 return -ENODEV;
3636         }
3637
3638         /* Clear firmware's nvram arbitration. */
3639         if (tg3_flag(tp, NVRAM))
3640                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3641         return 0;
3642 }
3643
3644 static int tg3_fw_data_len(struct tg3 *tp,
3645                            const struct tg3_firmware_hdr *fw_hdr)
3646 {
3647         int fw_len;
3648
3649         /* Non fragmented firmware have one firmware header followed by a
3650          * contiguous chunk of data to be written. The length field in that
3651          * header is not the length of data to be written but the complete
3652          * length of the bss. The data length is determined based on
3653          * tp->fw->size minus headers.
3654          *
3655          * Fragmented firmware have a main header followed by multiple
3656          * fragments. Each fragment is identical to non fragmented firmware
3657          * with a firmware header followed by a contiguous chunk of data. In
3658          * the main header, the length field is unused and set to 0xffffffff.
3659          * In each fragment header the length is the entire size of that
3660          * fragment i.e. fragment data + header length. Data length is
3661          * therefore length field in the header minus TG3_FW_HDR_LEN.
3662          */
3663         if (tp->fw_len == 0xffffffff)
3664                 fw_len = be32_to_cpu(fw_hdr->len);
3665         else
3666                 fw_len = tp->fw->size;
3667
3668         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3669 }
3670
3671 /* tp->lock is held. */
3672 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3673                                  u32 cpu_scratch_base, int cpu_scratch_size,
3674                                  const struct tg3_firmware_hdr *fw_hdr)
3675 {
3676         int err, i;
3677         void (*write_op)(struct tg3 *, u32, u32);
3678         int total_len = tp->fw->size;
3679
3680         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3681                 netdev_err(tp->dev,
3682                            "%s: Trying to load TX cpu firmware which is 5705\n",
3683                            __func__);
3684                 return -EINVAL;
3685         }
3686
3687         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3688                 write_op = tg3_write_mem;
3689         else
3690                 write_op = tg3_write_indirect_reg32;
3691
3692         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3693                 /* It is possible that bootcode is still loading at this point.
3694                  * Get the nvram lock first before halting the cpu.
3695                  */
3696                 int lock_err = tg3_nvram_lock(tp);
3697                 err = tg3_halt_cpu(tp, cpu_base);
3698                 if (!lock_err)
3699                         tg3_nvram_unlock(tp);
3700                 if (err)
3701                         goto out;
3702
3703                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3704                         write_op(tp, cpu_scratch_base + i, 0);
3705                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3706                 tw32(cpu_base + CPU_MODE,
3707                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3708         } else {
3709                 /* Subtract additional main header for fragmented firmware and
3710                  * advance to the first fragment
3711                  */
3712                 total_len -= TG3_FW_HDR_LEN;
3713                 fw_hdr++;
3714         }
3715
3716         do {
3717                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3718                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3719                         write_op(tp, cpu_scratch_base +
3720                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3721                                      (i * sizeof(u32)),
3722                                  be32_to_cpu(fw_data[i]));
3723
3724                 total_len -= be32_to_cpu(fw_hdr->len);
3725
3726                 /* Advance to next fragment */
3727                 fw_hdr = (struct tg3_firmware_hdr *)
3728                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3729         } while (total_len > 0);
3730
3731         err = 0;
3732
3733 out:
3734         return err;
3735 }
3736
3737 /* tp->lock is held. */
3738 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3739 {
3740         int i;
3741         const int iters = 5;
3742
3743         tw32(cpu_base + CPU_STATE, 0xffffffff);
3744         tw32_f(cpu_base + CPU_PC, pc);
3745
3746         for (i = 0; i < iters; i++) {
3747                 if (tr32(cpu_base + CPU_PC) == pc)
3748                         break;
3749                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3750                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3751                 tw32_f(cpu_base + CPU_PC, pc);
3752                 udelay(1000);
3753         }
3754
3755         return (i == iters) ? -EBUSY : 0;
3756 }
3757
3758 /* tp->lock is held. */
3759 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3760 {
3761         const struct tg3_firmware_hdr *fw_hdr;
3762         int err;
3763
3764         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3765
3766         /* Firmware blob starts with version numbers, followed by
3767            start address and length. We are setting complete length.
3768            length = end_address_of_bss - start_address_of_text.
3769            Remainder is the blob to be loaded contiguously
3770            from start address. */
3771
3772         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3773                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3774                                     fw_hdr);
3775         if (err)
3776                 return err;
3777
3778         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3779                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3780                                     fw_hdr);
3781         if (err)
3782                 return err;
3783
3784         /* Now startup only the RX cpu. */
3785         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3786                                        be32_to_cpu(fw_hdr->base_addr));
3787         if (err) {
3788                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3789                            "should be %08x\n", __func__,
3790                            tr32(RX_CPU_BASE + CPU_PC),
3791                                 be32_to_cpu(fw_hdr->base_addr));
3792                 return -ENODEV;
3793         }
3794
3795         tg3_rxcpu_resume(tp);
3796
3797         return 0;
3798 }
3799
3800 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3801 {
3802         const int iters = 1000;
3803         int i;
3804         u32 val;
3805
3806         /* Wait for boot code to complete initialization and enter service
3807          * loop. It is then safe to download service patches
3808          */
3809         for (i = 0; i < iters; i++) {
3810                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3811                         break;
3812
3813                 udelay(10);
3814         }
3815
3816         if (i == iters) {
3817                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3818                 return -EBUSY;
3819         }
3820
3821         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3822         if (val & 0xff) {
3823                 netdev_warn(tp->dev,
3824                             "Other patches exist. Not downloading EEE patch\n");
3825                 return -EEXIST;
3826         }
3827
3828         return 0;
3829 }
3830
3831 /* tp->lock is held. */
3832 static void tg3_load_57766_firmware(struct tg3 *tp)
3833 {
3834         struct tg3_firmware_hdr *fw_hdr;
3835
3836         if (!tg3_flag(tp, NO_NVRAM))
3837                 return;
3838
3839         if (tg3_validate_rxcpu_state(tp))
3840                 return;
3841
3842         if (!tp->fw)
3843                 return;
3844
3845         /* This firmware blob has a different format than older firmware
3846          * releases as given below. The main difference is we have fragmented
3847          * data to be written to non-contiguous locations.
3848          *
3849          * In the beginning we have a firmware header identical to other
3850          * firmware which consists of version, base addr and length. The length
3851          * here is unused and set to 0xffffffff.
3852          *
3853          * This is followed by a series of firmware fragments which are
3854          * individually identical to previous firmware. i.e. they have the
3855          * firmware header and followed by data for that fragment. The version
3856          * field of the individual fragment header is unused.
3857          */
3858
3859         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3860         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3861                 return;
3862
3863         if (tg3_rxcpu_pause(tp))
3864                 return;
3865
3866         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3867         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3868
3869         tg3_rxcpu_resume(tp);
3870 }
3871
3872 /* tp->lock is held. */
3873 static int tg3_load_tso_firmware(struct tg3 *tp)
3874 {
3875         const struct tg3_firmware_hdr *fw_hdr;
3876         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3877         int err;
3878
3879         if (!tg3_flag(tp, FW_TSO))
3880                 return 0;
3881
3882         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883
3884         /* Firmware blob starts with version numbers, followed by
3885            start address and length. We are setting complete length.
3886            length = end_address_of_bss - start_address_of_text.
3887            Remainder is the blob to be loaded contiguously
3888            from start address. */
3889
3890         cpu_scratch_size = tp->fw_len;
3891
3892         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3893                 cpu_base = RX_CPU_BASE;
3894                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3895         } else {
3896                 cpu_base = TX_CPU_BASE;
3897                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3898                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3899         }
3900
3901         err = tg3_load_firmware_cpu(tp, cpu_base,
3902                                     cpu_scratch_base, cpu_scratch_size,
3903                                     fw_hdr);
3904         if (err)
3905                 return err;
3906
3907         /* Now startup the cpu. */
3908         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3909                                        be32_to_cpu(fw_hdr->base_addr));
3910         if (err) {
3911                 netdev_err(tp->dev,
3912                            "%s fails to set CPU PC, is %08x should be %08x\n",
3913                            __func__, tr32(cpu_base + CPU_PC),
3914                            be32_to_cpu(fw_hdr->base_addr));
3915                 return -ENODEV;
3916         }
3917
3918         tg3_resume_cpu(tp, cpu_base);
3919         return 0;
3920 }
3921
3922
3923 /* tp->lock is held. */
3924 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3925 {
3926         u32 addr_high, addr_low;
3927         int i;
3928
3929         addr_high = ((tp->dev->dev_addr[0] << 8) |
3930                      tp->dev->dev_addr[1]);
3931         addr_low = ((tp->dev->dev_addr[2] << 24) |
3932                     (tp->dev->dev_addr[3] << 16) |
3933                     (tp->dev->dev_addr[4] <<  8) |
3934                     (tp->dev->dev_addr[5] <<  0));
3935         for (i = 0; i < 4; i++) {
3936                 if (i == 1 && skip_mac_1)
3937                         continue;
3938                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3939                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3940         }
3941
3942         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3943             tg3_asic_rev(tp) == ASIC_REV_5704) {
3944                 for (i = 0; i < 12; i++) {
3945                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3946                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3947                 }
3948         }
3949
3950         addr_high = (tp->dev->dev_addr[0] +
3951                      tp->dev->dev_addr[1] +
3952                      tp->dev->dev_addr[2] +
3953                      tp->dev->dev_addr[3] +
3954                      tp->dev->dev_addr[4] +
3955                      tp->dev->dev_addr[5]) &
3956                 TX_BACKOFF_SEED_MASK;
3957         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3958 }
3959
3960 static void tg3_enable_register_access(struct tg3 *tp)
3961 {
3962         /*
3963          * Make sure register accesses (indirect or otherwise) will function
3964          * correctly.
3965          */
3966         pci_write_config_dword(tp->pdev,
3967                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3968 }
3969
3970 static int tg3_power_up(struct tg3 *tp)
3971 {
3972         int err;
3973
3974         tg3_enable_register_access(tp);
3975
3976         err = pci_set_power_state(tp->pdev, PCI_D0);
3977         if (!err) {
3978                 /* Switch out of Vaux if it is a NIC */
3979                 tg3_pwrsrc_switch_to_vmain(tp);
3980         } else {
3981                 netdev_err(tp->dev, "Transition to D0 failed\n");
3982         }
3983
3984         return err;
3985 }
3986
3987 static int tg3_setup_phy(struct tg3 *, bool);
3988
3989 static int tg3_power_down_prepare(struct tg3 *tp)
3990 {
3991         u32 misc_host_ctrl;
3992         bool device_should_wake, do_low_power;
3993
3994         tg3_enable_register_access(tp);
3995
3996         /* Restore the CLKREQ setting. */
3997         if (tg3_flag(tp, CLKREQ_BUG))
3998                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3999                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4000
4001         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4002         tw32(TG3PCI_MISC_HOST_CTRL,
4003              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4004
4005         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4006                              tg3_flag(tp, WOL_ENABLE);
4007
4008         if (tg3_flag(tp, USE_PHYLIB)) {
4009                 do_low_power = false;
4010                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4011                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4012                         struct phy_device *phydev;
4013                         u32 phyid, advertising;
4014
4015                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4016
4017                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4018
4019                         tp->link_config.speed = phydev->speed;
4020                         tp->link_config.duplex = phydev->duplex;
4021                         tp->link_config.autoneg = phydev->autoneg;
4022                         tp->link_config.advertising = phydev->advertising;
4023
4024                         advertising = ADVERTISED_TP |
4025                                       ADVERTISED_Pause |
4026                                       ADVERTISED_Autoneg |
4027                                       ADVERTISED_10baseT_Half;
4028
4029                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4030                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4031                                         advertising |=
4032                                                 ADVERTISED_100baseT_Half |
4033                                                 ADVERTISED_100baseT_Full |
4034                                                 ADVERTISED_10baseT_Full;
4035                                 else
4036                                         advertising |= ADVERTISED_10baseT_Full;
4037                         }
4038
4039                         phydev->advertising = advertising;
4040
4041                         phy_start_aneg(phydev);
4042
4043                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4044                         if (phyid != PHY_ID_BCMAC131) {
4045                                 phyid &= PHY_BCM_OUI_MASK;
4046                                 if (phyid == PHY_BCM_OUI_1 ||
4047                                     phyid == PHY_BCM_OUI_2 ||
4048                                     phyid == PHY_BCM_OUI_3)
4049                                         do_low_power = true;
4050                         }
4051                 }
4052         } else {
4053                 do_low_power = true;
4054
4055                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4056                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4057
4058                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4059                         tg3_setup_phy(tp, false);
4060         }
4061
4062         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4063                 u32 val;
4064
4065                 val = tr32(GRC_VCPU_EXT_CTRL);
4066                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4067         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4068                 int i;
4069                 u32 val;
4070
4071                 for (i = 0; i < 200; i++) {
4072                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4073                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4074                                 break;
4075                         msleep(1);
4076                 }
4077         }
4078         if (tg3_flag(tp, WOL_CAP))
4079                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4080                                                      WOL_DRV_STATE_SHUTDOWN |
4081                                                      WOL_DRV_WOL |
4082                                                      WOL_SET_MAGIC_PKT);
4083
4084         if (device_should_wake) {
4085                 u32 mac_mode;
4086
4087                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4088                         if (do_low_power &&
4089                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4090                                 tg3_phy_auxctl_write(tp,
4091                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4092                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4093                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4094                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4095                                 udelay(40);
4096                         }
4097
4098                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4099                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4100                         else if (tp->phy_flags &
4101                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4102                                 if (tp->link_config.active_speed == SPEED_1000)
4103                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4104                                 else
4105                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4106                         } else
4107                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4108
4109                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4110                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4111                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4112                                              SPEED_100 : SPEED_10;
4113                                 if (tg3_5700_link_polarity(tp, speed))
4114                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4115                                 else
4116                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4117                         }
4118                 } else {
4119                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4120                 }
4121
4122                 if (!tg3_flag(tp, 5750_PLUS))
4123                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4124
4125                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4126                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4127                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4128                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4129
4130                 if (tg3_flag(tp, ENABLE_APE))
4131                         mac_mode |= MAC_MODE_APE_TX_EN |
4132                                     MAC_MODE_APE_RX_EN |
4133                                     MAC_MODE_TDE_ENABLE;
4134
4135                 tw32_f(MAC_MODE, mac_mode);
4136                 udelay(100);
4137
4138                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4139                 udelay(10);
4140         }
4141
4142         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4143             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4144              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4145                 u32 base_val;
4146
4147                 base_val = tp->pci_clock_ctrl;
4148                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4149                              CLOCK_CTRL_TXCLK_DISABLE);
4150
4151                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4152                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4153         } else if (tg3_flag(tp, 5780_CLASS) ||
4154                    tg3_flag(tp, CPMU_PRESENT) ||
4155                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4156                 /* do nothing */
4157         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4158                 u32 newbits1, newbits2;
4159
4160                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4161                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4162                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4163                                     CLOCK_CTRL_TXCLK_DISABLE |
4164                                     CLOCK_CTRL_ALTCLK);
4165                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4166                 } else if (tg3_flag(tp, 5705_PLUS)) {
4167                         newbits1 = CLOCK_CTRL_625_CORE;
4168                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4169                 } else {
4170                         newbits1 = CLOCK_CTRL_ALTCLK;
4171                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4172                 }
4173
4174                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4175                             40);
4176
4177                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4178                             40);
4179
4180                 if (!tg3_flag(tp, 5705_PLUS)) {
4181                         u32 newbits3;
4182
4183                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4184                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4185                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4186                                             CLOCK_CTRL_TXCLK_DISABLE |
4187                                             CLOCK_CTRL_44MHZ_CORE);
4188                         } else {
4189                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4190                         }
4191
4192                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4193                                     tp->pci_clock_ctrl | newbits3, 40);
4194                 }
4195         }
4196
4197         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4198                 tg3_power_down_phy(tp, do_low_power);
4199
4200         tg3_frob_aux_power(tp, true);
4201
4202         /* Workaround for unstable PLL clock */
4203         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4204             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4205              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4206                 u32 val = tr32(0x7d00);
4207
4208                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4209                 tw32(0x7d00, val);
4210                 if (!tg3_flag(tp, ENABLE_ASF)) {
4211                         int err;
4212
4213                         err = tg3_nvram_lock(tp);
4214                         tg3_halt_cpu(tp, RX_CPU_BASE);
4215                         if (!err)
4216                                 tg3_nvram_unlock(tp);
4217                 }
4218         }
4219
4220         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4221
4222         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4223
4224         return 0;
4225 }
4226
4227 static void tg3_power_down(struct tg3 *tp)
4228 {
4229         tg3_power_down_prepare(tp);
4230
4231         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4232         pci_set_power_state(tp->pdev, PCI_D3hot);
4233 }
4234
4235 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4236 {
4237         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4238         case MII_TG3_AUX_STAT_10HALF:
4239                 *speed = SPEED_10;
4240                 *duplex = DUPLEX_HALF;
4241                 break;
4242
4243         case MII_TG3_AUX_STAT_10FULL:
4244                 *speed = SPEED_10;
4245                 *duplex = DUPLEX_FULL;
4246                 break;
4247
4248         case MII_TG3_AUX_STAT_100HALF:
4249                 *speed = SPEED_100;
4250                 *duplex = DUPLEX_HALF;
4251                 break;
4252
4253         case MII_TG3_AUX_STAT_100FULL:
4254                 *speed = SPEED_100;
4255                 *duplex = DUPLEX_FULL;
4256                 break;
4257
4258         case MII_TG3_AUX_STAT_1000HALF:
4259                 *speed = SPEED_1000;
4260                 *duplex = DUPLEX_HALF;
4261                 break;
4262
4263         case MII_TG3_AUX_STAT_1000FULL:
4264                 *speed = SPEED_1000;
4265                 *duplex = DUPLEX_FULL;
4266                 break;
4267
4268         default:
4269                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4270                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4271                                  SPEED_10;
4272                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4273                                   DUPLEX_HALF;
4274                         break;
4275                 }
4276                 *speed = SPEED_UNKNOWN;
4277                 *duplex = DUPLEX_UNKNOWN;
4278                 break;
4279         }
4280 }
4281
4282 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4283 {
4284         int err = 0;
4285         u32 val, new_adv;
4286
4287         new_adv = ADVERTISE_CSMA;
4288         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4289         new_adv |= mii_advertise_flowctrl(flowctrl);
4290
4291         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4292         if (err)
4293                 goto done;
4294
4295         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4296                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4297
4298                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4299                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4300                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4301
4302                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4303                 if (err)
4304                         goto done;
4305         }
4306
4307         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4308                 goto done;
4309
4310         tw32(TG3_CPMU_EEE_MODE,
4311              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4312
4313         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4314         if (!err) {
4315                 u32 err2;
4316
4317                 val = 0;
4318                 /* Advertise 100-BaseTX EEE ability */
4319                 if (advertise & ADVERTISED_100baseT_Full)
4320                         val |= MDIO_AN_EEE_ADV_100TX;
4321                 /* Advertise 1000-BaseT EEE ability */
4322                 if (advertise & ADVERTISED_1000baseT_Full)
4323                         val |= MDIO_AN_EEE_ADV_1000T;
4324
4325                 if (!tp->eee.eee_enabled) {
4326                         val = 0;
4327                         tp->eee.advertised = 0;
4328                 } else {
4329                         tp->eee.advertised = advertise &
4330                                              (ADVERTISED_100baseT_Full |
4331                                               ADVERTISED_1000baseT_Full);
4332                 }
4333
4334                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4335                 if (err)
4336                         val = 0;
4337
4338                 switch (tg3_asic_rev(tp)) {
4339                 case ASIC_REV_5717:
4340                 case ASIC_REV_57765:
4341                 case ASIC_REV_57766:
4342                 case ASIC_REV_5719:
4343                         /* If we advertised any eee advertisements above... */
4344                         if (val)
4345                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4346                                       MII_TG3_DSP_TAP26_RMRXSTO |
4347                                       MII_TG3_DSP_TAP26_OPCSINPT;
4348                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4349                         /* Fall through */
4350                 case ASIC_REV_5720:
4351                 case ASIC_REV_5762:
4352                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4353                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4354                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4355                 }
4356
4357                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4358                 if (!err)
4359                         err = err2;
4360         }
4361
4362 done:
4363         return err;
4364 }
4365
4366 static void tg3_phy_copper_begin(struct tg3 *tp)
4367 {
4368         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4369             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4370                 u32 adv, fc;
4371
4372                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4373                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4374                         adv = ADVERTISED_10baseT_Half |
4375                               ADVERTISED_10baseT_Full;
4376                         if (tg3_flag(tp, WOL_SPEED_100MB))
4377                                 adv |= ADVERTISED_100baseT_Half |
4378                                        ADVERTISED_100baseT_Full;
4379                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4380                                 adv |= ADVERTISED_1000baseT_Half |
4381                                        ADVERTISED_1000baseT_Full;
4382
4383                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4384                 } else {
4385                         adv = tp->link_config.advertising;
4386                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4387                                 adv &= ~(ADVERTISED_1000baseT_Half |
4388                                          ADVERTISED_1000baseT_Full);
4389
4390                         fc = tp->link_config.flowctrl;
4391                 }
4392
4393                 tg3_phy_autoneg_cfg(tp, adv, fc);
4394
4395                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4396                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4397                         /* Normally during power down we want to autonegotiate
4398                          * the lowest possible speed for WOL. However, to avoid
4399                          * link flap, we leave it untouched.
4400                          */
4401                         return;
4402                 }
4403
4404                 tg3_writephy(tp, MII_BMCR,
4405                              BMCR_ANENABLE | BMCR_ANRESTART);
4406         } else {
4407                 int i;
4408                 u32 bmcr, orig_bmcr;
4409
4410                 tp->link_config.active_speed = tp->link_config.speed;
4411                 tp->link_config.active_duplex = tp->link_config.duplex;
4412
4413                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4414                         /* With autoneg disabled, 5715 only links up when the
4415                          * advertisement register has the configured speed
4416                          * enabled.
4417                          */
4418                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4419                 }
4420
4421                 bmcr = 0;
4422                 switch (tp->link_config.speed) {
4423                 default:
4424                 case SPEED_10:
4425                         break;
4426
4427                 case SPEED_100:
4428                         bmcr |= BMCR_SPEED100;
4429                         break;
4430
4431                 case SPEED_1000:
4432                         bmcr |= BMCR_SPEED1000;
4433                         break;
4434                 }
4435
4436                 if (tp->link_config.duplex == DUPLEX_FULL)
4437                         bmcr |= BMCR_FULLDPLX;
4438
4439                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4440                     (bmcr != orig_bmcr)) {
4441                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4442                         for (i = 0; i < 1500; i++) {
4443                                 u32 tmp;
4444
4445                                 udelay(10);
4446                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4447                                     tg3_readphy(tp, MII_BMSR, &tmp))
4448                                         continue;
4449                                 if (!(tmp & BMSR_LSTATUS)) {
4450                                         udelay(40);
4451                                         break;
4452                                 }
4453                         }
4454                         tg3_writephy(tp, MII_BMCR, bmcr);
4455                         udelay(40);
4456                 }
4457         }
4458 }
4459
4460 static int tg3_phy_pull_config(struct tg3 *tp)
4461 {
4462         int err;
4463         u32 val;
4464
4465         err = tg3_readphy(tp, MII_BMCR, &val);
4466         if (err)
4467                 goto done;
4468
4469         if (!(val & BMCR_ANENABLE)) {
4470                 tp->link_config.autoneg = AUTONEG_DISABLE;
4471                 tp->link_config.advertising = 0;
4472                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4473
4474                 err = -EIO;
4475
4476                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4477                 case 0:
4478                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4479                                 goto done;
4480
4481                         tp->link_config.speed = SPEED_10;
4482                         break;
4483                 case BMCR_SPEED100:
4484                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4485                                 goto done;
4486
4487                         tp->link_config.speed = SPEED_100;
4488                         break;
4489                 case BMCR_SPEED1000:
4490                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4491                                 tp->link_config.speed = SPEED_1000;
4492                                 break;
4493                         }
4494                         /* Fall through */
4495                 default:
4496                         goto done;
4497                 }
4498
4499                 if (val & BMCR_FULLDPLX)
4500                         tp->link_config.duplex = DUPLEX_FULL;
4501                 else
4502                         tp->link_config.duplex = DUPLEX_HALF;
4503
4504                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4505
4506                 err = 0;
4507                 goto done;
4508         }
4509
4510         tp->link_config.autoneg = AUTONEG_ENABLE;
4511         tp->link_config.advertising = ADVERTISED_Autoneg;
4512         tg3_flag_set(tp, PAUSE_AUTONEG);
4513
4514         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4515                 u32 adv;
4516
4517                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4518                 if (err)
4519                         goto done;
4520
4521                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4522                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4523
4524                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4525         } else {
4526                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4527         }
4528
4529         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4530                 u32 adv;
4531
4532                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4533                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4534                         if (err)
4535                                 goto done;
4536
4537                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4538                 } else {
4539                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4540                         if (err)
4541                                 goto done;
4542
4543                         adv = tg3_decode_flowctrl_1000X(val);
4544                         tp->link_config.flowctrl = adv;
4545
4546                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4547                         adv = mii_adv_to_ethtool_adv_x(val);
4548                 }
4549
4550                 tp->link_config.advertising |= adv;
4551         }
4552
4553 done:
4554         return err;
4555 }
4556
4557 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4558 {
4559         int err;
4560
4561         /* Turn off tap power management. */
4562         /* Set Extended packet length bit */
4563         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4564
4565         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4566         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4567         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4568         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4569         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4570
4571         udelay(40);
4572
4573         return err;
4574 }
4575
4576 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4577 {
4578         struct ethtool_eee eee;
4579
4580         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4581                 return true;
4582
4583         tg3_eee_pull_config(tp, &eee);
4584
4585         if (tp->eee.eee_enabled) {
4586                 if (tp->eee.advertised != eee.advertised ||
4587                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4588                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4589                         return false;
4590         } else {
4591                 /* EEE is disabled but we're advertising */
4592                 if (eee.advertised)
4593                         return false;
4594         }
4595
4596         return true;
4597 }
4598
4599 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4600 {
4601         u32 advmsk, tgtadv, advertising;
4602
4603         advertising = tp->link_config.advertising;
4604         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4605
4606         advmsk = ADVERTISE_ALL;
4607         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4608                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4609                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4610         }
4611
4612         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4613                 return false;
4614
4615         if ((*lcladv & advmsk) != tgtadv)
4616                 return false;
4617
4618         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4619                 u32 tg3_ctrl;
4620
4621                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4622
4623                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4624                         return false;
4625
4626                 if (tgtadv &&
4627                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4628                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4629                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4630                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4631                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4632                 } else {
4633                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4634                 }
4635
4636                 if (tg3_ctrl != tgtadv)
4637                         return false;
4638         }
4639
4640         return true;
4641 }
4642
4643 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4644 {
4645         u32 lpeth = 0;
4646
4647         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4648                 u32 val;
4649
4650                 if (tg3_readphy(tp, MII_STAT1000, &val))
4651                         return false;
4652
4653                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4654         }
4655
4656         if (tg3_readphy(tp, MII_LPA, rmtadv))
4657                 return false;
4658
4659         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4660         tp->link_config.rmt_adv = lpeth;
4661
4662         return true;
4663 }
4664
4665 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4666 {
4667         if (curr_link_up != tp->link_up) {
4668                 if (curr_link_up) {
4669                         netif_carrier_on(tp->dev);
4670                 } else {
4671                         netif_carrier_off(tp->dev);
4672                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4673                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4674                 }
4675
4676                 tg3_link_report(tp);
4677                 return true;
4678         }
4679
4680         return false;
4681 }
4682
4683 static void tg3_clear_mac_status(struct tg3 *tp)
4684 {
4685         tw32(MAC_EVENT, 0);
4686
4687         tw32_f(MAC_STATUS,
4688                MAC_STATUS_SYNC_CHANGED |
4689                MAC_STATUS_CFG_CHANGED |
4690                MAC_STATUS_MI_COMPLETION |
4691                MAC_STATUS_LNKSTATE_CHANGED);
4692         udelay(40);
4693 }
4694
4695 static void tg3_setup_eee(struct tg3 *tp)
4696 {
4697         u32 val;
4698
4699         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4700               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4701         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4702                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4703
4704         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4705
4706         tw32_f(TG3_CPMU_EEE_CTRL,
4707                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4708
4709         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4710               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4711               TG3_CPMU_EEEMD_LPI_IN_RX |
4712               TG3_CPMU_EEEMD_EEE_ENABLE;
4713
4714         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4715                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4716
4717         if (tg3_flag(tp, ENABLE_APE))
4718                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4719
4720         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4721
4722         tw32_f(TG3_CPMU_EEE_DBTMR1,
4723                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4724                (tp->eee.tx_lpi_timer & 0xffff));
4725
4726         tw32_f(TG3_CPMU_EEE_DBTMR2,
4727                TG3_CPMU_DBTMR2_APE_TX_2047US |
4728                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4729 }
4730
4731 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4732 {
4733         bool current_link_up;
4734         u32 bmsr, val;
4735         u32 lcl_adv, rmt_adv;
4736         u16 current_speed;
4737         u8 current_duplex;
4738         int i, err;
4739
4740         tg3_clear_mac_status(tp);
4741
4742         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4743                 tw32_f(MAC_MI_MODE,
4744                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4745                 udelay(80);
4746         }
4747
4748         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4749
4750         /* Some third-party PHYs need to be reset on link going
4751          * down.
4752          */
4753         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4754              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4755              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4756             tp->link_up) {
4757                 tg3_readphy(tp, MII_BMSR, &bmsr);
4758                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4759                     !(bmsr & BMSR_LSTATUS))
4760                         force_reset = true;
4761         }
4762         if (force_reset)
4763                 tg3_phy_reset(tp);
4764
4765         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4766                 tg3_readphy(tp, MII_BMSR, &bmsr);
4767                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4768                     !tg3_flag(tp, INIT_COMPLETE))
4769                         bmsr = 0;
4770
4771                 if (!(bmsr & BMSR_LSTATUS)) {
4772                         err = tg3_init_5401phy_dsp(tp);
4773                         if (err)
4774                                 return err;
4775
4776                         tg3_readphy(tp, MII_BMSR, &bmsr);
4777                         for (i = 0; i < 1000; i++) {
4778                                 udelay(10);
4779                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4780                                     (bmsr & BMSR_LSTATUS)) {
4781                                         udelay(40);
4782                                         break;
4783                                 }
4784                         }
4785
4786                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4787                             TG3_PHY_REV_BCM5401_B0 &&
4788                             !(bmsr & BMSR_LSTATUS) &&
4789                             tp->link_config.active_speed == SPEED_1000) {
4790                                 err = tg3_phy_reset(tp);
4791                                 if (!err)
4792                                         err = tg3_init_5401phy_dsp(tp);
4793                                 if (err)
4794                                         return err;
4795                         }
4796                 }
4797         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4798                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4799                 /* 5701 {A0,B0} CRC bug workaround */
4800                 tg3_writephy(tp, 0x15, 0x0a75);
4801                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4802                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4803                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4804         }
4805
4806         /* Clear pending interrupts... */
4807         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4808         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4809
4810         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4811                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4812         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4813                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4814
4815         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4816             tg3_asic_rev(tp) == ASIC_REV_5701) {
4817                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4818                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4819                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4820                 else
4821                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4822         }
4823
4824         current_link_up = false;
4825         current_speed = SPEED_UNKNOWN;
4826         current_duplex = DUPLEX_UNKNOWN;
4827         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4828         tp->link_config.rmt_adv = 0;
4829
4830         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4831                 err = tg3_phy_auxctl_read(tp,
4832                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4833                                           &val);
4834                 if (!err && !(val & (1 << 10))) {
4835                         tg3_phy_auxctl_write(tp,
4836                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4837                                              val | (1 << 10));
4838                         goto relink;
4839                 }
4840         }
4841
4842         bmsr = 0;
4843         for (i = 0; i < 100; i++) {
4844                 tg3_readphy(tp, MII_BMSR, &bmsr);
4845                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4846                     (bmsr & BMSR_LSTATUS))
4847                         break;
4848                 udelay(40);
4849         }
4850
4851         if (bmsr & BMSR_LSTATUS) {
4852                 u32 aux_stat, bmcr;
4853
4854                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4855                 for (i = 0; i < 2000; i++) {
4856                         udelay(10);
4857                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4858                             aux_stat)
4859                                 break;
4860                 }
4861
4862                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4863                                              &current_speed,
4864                                              &current_duplex);
4865
4866                 bmcr = 0;
4867                 for (i = 0; i < 200; i++) {
4868                         tg3_readphy(tp, MII_BMCR, &bmcr);
4869                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4870                                 continue;
4871                         if (bmcr && bmcr != 0x7fff)
4872                                 break;
4873                         udelay(10);
4874                 }
4875
4876                 lcl_adv = 0;
4877                 rmt_adv = 0;
4878
4879                 tp->link_config.active_speed = current_speed;
4880                 tp->link_config.active_duplex = current_duplex;
4881
4882                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4883                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4884
4885                         if ((bmcr & BMCR_ANENABLE) &&
4886                             eee_config_ok &&
4887                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4888                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4889                                 current_link_up = true;
4890
4891                         /* EEE settings changes take effect only after a phy
4892                          * reset.  If we have skipped a reset due to Link Flap
4893                          * Avoidance being enabled, do it now.
4894                          */
4895                         if (!eee_config_ok &&
4896                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4897                             !force_reset) {
4898                                 tg3_setup_eee(tp);
4899                                 tg3_phy_reset(tp);
4900                         }
4901                 } else {
4902                         if (!(bmcr & BMCR_ANENABLE) &&
4903                             tp->link_config.speed == current_speed &&
4904                             tp->link_config.duplex == current_duplex) {
4905                                 current_link_up = true;
4906                         }
4907                 }
4908
4909                 if (current_link_up &&
4910                     tp->link_config.active_duplex == DUPLEX_FULL) {
4911                         u32 reg, bit;
4912
4913                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4914                                 reg = MII_TG3_FET_GEN_STAT;
4915                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4916                         } else {
4917                                 reg = MII_TG3_EXT_STAT;
4918                                 bit = MII_TG3_EXT_STAT_MDIX;
4919                         }
4920
4921                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4922                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4923
4924                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4925                 }
4926         }
4927
4928 relink:
4929         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4930                 tg3_phy_copper_begin(tp);
4931
4932                 if (tg3_flag(tp, ROBOSWITCH)) {
4933                         current_link_up = true;
4934                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4935                         current_speed = SPEED_1000;
4936                         current_duplex = DUPLEX_FULL;
4937                         tp->link_config.active_speed = current_speed;
4938                         tp->link_config.active_duplex = current_duplex;
4939                 }
4940
4941                 tg3_readphy(tp, MII_BMSR, &bmsr);
4942                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4943                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4944                         current_link_up = true;
4945         }
4946
4947         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4948         if (current_link_up) {
4949                 if (tp->link_config.active_speed == SPEED_100 ||
4950                     tp->link_config.active_speed == SPEED_10)
4951                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4952                 else
4953                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4954         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4955                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4956         else
4957                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4958
4959         /* In order for the 5750 core in BCM4785 chip to work properly
4960          * in RGMII mode, the Led Control Register must be set up.
4961          */
4962         if (tg3_flag(tp, RGMII_MODE)) {
4963                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4964                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4965
4966                 if (tp->link_config.active_speed == SPEED_10)
4967                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4968                 else if (tp->link_config.active_speed == SPEED_100)
4969                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4970                                      LED_CTRL_100MBPS_ON);
4971                 else if (tp->link_config.active_speed == SPEED_1000)
4972                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4973                                      LED_CTRL_1000MBPS_ON);
4974
4975                 tw32(MAC_LED_CTRL, led_ctrl);
4976                 udelay(40);
4977         }
4978
4979         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4980         if (tp->link_config.active_duplex == DUPLEX_HALF)
4981                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4982
4983         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4984                 if (current_link_up &&
4985                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4986                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4987                 else
4988                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4989         }
4990
4991         /* ??? Without this setting Netgear GA302T PHY does not
4992          * ??? send/receive packets...
4993          */
4994         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4995             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4996                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4997                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4998                 udelay(80);
4999         }
5000
5001         tw32_f(MAC_MODE, tp->mac_mode);
5002         udelay(40);
5003
5004         tg3_phy_eee_adjust(tp, current_link_up);
5005
5006         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5007                 /* Polled via timer. */
5008                 tw32_f(MAC_EVENT, 0);
5009         } else {
5010                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5011         }
5012         udelay(40);
5013
5014         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5015             current_link_up &&
5016             tp->link_config.active_speed == SPEED_1000 &&
5017             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5018                 udelay(120);
5019                 tw32_f(MAC_STATUS,
5020                      (MAC_STATUS_SYNC_CHANGED |
5021                       MAC_STATUS_CFG_CHANGED));
5022                 udelay(40);
5023                 tg3_write_mem(tp,
5024                               NIC_SRAM_FIRMWARE_MBOX,
5025                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5026         }
5027
5028         /* Prevent send BD corruption. */
5029         if (tg3_flag(tp, CLKREQ_BUG)) {
5030                 if (tp->link_config.active_speed == SPEED_100 ||
5031                     tp->link_config.active_speed == SPEED_10)
5032                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5033                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5034                 else
5035                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5036                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5037         }
5038
5039         tg3_test_and_report_link_chg(tp, current_link_up);
5040
5041         return 0;
5042 }
5043
5044 struct tg3_fiber_aneginfo {
5045         int state;
5046 #define ANEG_STATE_UNKNOWN              0
5047 #define ANEG_STATE_AN_ENABLE            1
5048 #define ANEG_STATE_RESTART_INIT         2
5049 #define ANEG_STATE_RESTART              3
5050 #define ANEG_STATE_DISABLE_LINK_OK      4
5051 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5052 #define ANEG_STATE_ABILITY_DETECT       6
5053 #define ANEG_STATE_ACK_DETECT_INIT      7
5054 #define ANEG_STATE_ACK_DETECT           8
5055 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5056 #define ANEG_STATE_COMPLETE_ACK         10
5057 #define ANEG_STATE_IDLE_DETECT_INIT     11
5058 #define ANEG_STATE_IDLE_DETECT          12
5059 #define ANEG_STATE_LINK_OK              13
5060 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5061 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5062
5063         u32 flags;
5064 #define MR_AN_ENABLE            0x00000001
5065 #define MR_RESTART_AN           0x00000002
5066 #define MR_AN_COMPLETE          0x00000004
5067 #define MR_PAGE_RX              0x00000008
5068 #define MR_NP_LOADED            0x00000010
5069 #define MR_TOGGLE_TX            0x00000020
5070 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5071 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5072 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5073 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5074 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5075 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5076 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5077 #define MR_TOGGLE_RX            0x00002000
5078 #define MR_NP_RX                0x00004000
5079
5080 #define MR_LINK_OK              0x80000000
5081
5082         unsigned long link_time, cur_time;
5083
5084         u32 ability_match_cfg;
5085         int ability_match_count;
5086
5087         char ability_match, idle_match, ack_match;
5088
5089         u32 txconfig, rxconfig;
5090 #define ANEG_CFG_NP             0x00000080
5091 #define ANEG_CFG_ACK            0x00000040
5092 #define ANEG_CFG_RF2            0x00000020
5093 #define ANEG_CFG_RF1            0x00000010
5094 #define ANEG_CFG_PS2            0x00000001
5095 #define ANEG_CFG_PS1            0x00008000
5096 #define ANEG_CFG_HD             0x00004000
5097 #define ANEG_CFG_FD             0x00002000
5098 #define ANEG_CFG_INVAL          0x00001f06
5099
5100 };
5101 #define ANEG_OK         0
5102 #define ANEG_DONE       1
5103 #define ANEG_TIMER_ENAB 2
5104 #define ANEG_FAILED     -1
5105
5106 #define ANEG_STATE_SETTLE_TIME  10000
5107
5108 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5109                                    struct tg3_fiber_aneginfo *ap)
5110 {
5111         u16 flowctrl;
5112         unsigned long delta;
5113         u32 rx_cfg_reg;
5114         int ret;
5115
5116         if (ap->state == ANEG_STATE_UNKNOWN) {
5117                 ap->rxconfig = 0;
5118                 ap->link_time = 0;
5119                 ap->cur_time = 0;
5120                 ap->ability_match_cfg = 0;
5121                 ap->ability_match_count = 0;
5122                 ap->ability_match = 0;
5123                 ap->idle_match = 0;
5124                 ap->ack_match = 0;
5125         }
5126         ap->cur_time++;
5127
5128         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5129                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5130
5131                 if (rx_cfg_reg != ap->ability_match_cfg) {
5132                         ap->ability_match_cfg = rx_cfg_reg;
5133                         ap->ability_match = 0;
5134                         ap->ability_match_count = 0;
5135                 } else {
5136                         if (++ap->ability_match_count > 1) {
5137                                 ap->ability_match = 1;
5138                                 ap->ability_match_cfg = rx_cfg_reg;
5139                         }
5140                 }
5141                 if (rx_cfg_reg & ANEG_CFG_ACK)
5142                         ap->ack_match = 1;
5143                 else
5144                         ap->ack_match = 0;
5145
5146                 ap->idle_match = 0;
5147         } else {
5148                 ap->idle_match = 1;
5149                 ap->ability_match_cfg = 0;
5150                 ap->ability_match_count = 0;
5151                 ap->ability_match = 0;
5152                 ap->ack_match = 0;
5153
5154                 rx_cfg_reg = 0;
5155         }
5156
5157         ap->rxconfig = rx_cfg_reg;
5158         ret = ANEG_OK;
5159
5160         switch (ap->state) {
5161         case ANEG_STATE_UNKNOWN:
5162                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5163                         ap->state = ANEG_STATE_AN_ENABLE;
5164
5165                 /* fallthru */
5166         case ANEG_STATE_AN_ENABLE:
5167                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5168                 if (ap->flags & MR_AN_ENABLE) {
5169                         ap->link_time = 0;
5170                         ap->cur_time = 0;
5171                         ap->ability_match_cfg = 0;
5172                         ap->ability_match_count = 0;
5173                         ap->ability_match = 0;
5174                         ap->idle_match = 0;
5175                         ap->ack_match = 0;
5176
5177                         ap->state = ANEG_STATE_RESTART_INIT;
5178                 } else {
5179                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5180                 }
5181                 break;
5182
5183         case ANEG_STATE_RESTART_INIT:
5184                 ap->link_time = ap->cur_time;
5185                 ap->flags &= ~(MR_NP_LOADED);
5186                 ap->txconfig = 0;
5187                 tw32(MAC_TX_AUTO_NEG, 0);
5188                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5189                 tw32_f(MAC_MODE, tp->mac_mode);
5190                 udelay(40);
5191
5192                 ret = ANEG_TIMER_ENAB;
5193                 ap->state = ANEG_STATE_RESTART;
5194
5195                 /* fallthru */
5196         case ANEG_STATE_RESTART:
5197                 delta = ap->cur_time - ap->link_time;
5198                 if (delta > ANEG_STATE_SETTLE_TIME)
5199                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5200                 else
5201                         ret = ANEG_TIMER_ENAB;
5202                 break;
5203
5204         case ANEG_STATE_DISABLE_LINK_OK:
5205                 ret = ANEG_DONE;
5206                 break;
5207
5208         case ANEG_STATE_ABILITY_DETECT_INIT:
5209                 ap->flags &= ~(MR_TOGGLE_TX);
5210                 ap->txconfig = ANEG_CFG_FD;
5211                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5212                 if (flowctrl & ADVERTISE_1000XPAUSE)
5213                         ap->txconfig |= ANEG_CFG_PS1;
5214                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5215                         ap->txconfig |= ANEG_CFG_PS2;
5216                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5217                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5218                 tw32_f(MAC_MODE, tp->mac_mode);
5219                 udelay(40);
5220
5221                 ap->state = ANEG_STATE_ABILITY_DETECT;
5222                 break;
5223
5224         case ANEG_STATE_ABILITY_DETECT:
5225                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5226                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5227                 break;
5228
5229         case ANEG_STATE_ACK_DETECT_INIT:
5230                 ap->txconfig |= ANEG_CFG_ACK;
5231                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5232                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5233                 tw32_f(MAC_MODE, tp->mac_mode);
5234                 udelay(40);
5235
5236                 ap->state = ANEG_STATE_ACK_DETECT;
5237
5238                 /* fallthru */
5239         case ANEG_STATE_ACK_DETECT:
5240                 if (ap->ack_match != 0) {
5241                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5242                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5243                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5244                         } else {
5245                                 ap->state = ANEG_STATE_AN_ENABLE;
5246                         }
5247                 } else if (ap->ability_match != 0 &&
5248                            ap->rxconfig == 0) {
5249                         ap->state = ANEG_STATE_AN_ENABLE;
5250                 }
5251                 break;
5252
5253         case ANEG_STATE_COMPLETE_ACK_INIT:
5254                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5255                         ret = ANEG_FAILED;
5256                         break;
5257                 }
5258                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5259                                MR_LP_ADV_HALF_DUPLEX |
5260                                MR_LP_ADV_SYM_PAUSE |
5261                                MR_LP_ADV_ASYM_PAUSE |
5262                                MR_LP_ADV_REMOTE_FAULT1 |
5263                                MR_LP_ADV_REMOTE_FAULT2 |
5264                                MR_LP_ADV_NEXT_PAGE |
5265                                MR_TOGGLE_RX |
5266                                MR_NP_RX);
5267                 if (ap->rxconfig & ANEG_CFG_FD)
5268                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5269                 if (ap->rxconfig & ANEG_CFG_HD)
5270                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5271                 if (ap->rxconfig & ANEG_CFG_PS1)
5272                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5273                 if (ap->rxconfig & ANEG_CFG_PS2)
5274                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5275                 if (ap->rxconfig & ANEG_CFG_RF1)
5276                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5277                 if (ap->rxconfig & ANEG_CFG_RF2)
5278                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5279                 if (ap->rxconfig & ANEG_CFG_NP)
5280                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5281
5282                 ap->link_time = ap->cur_time;
5283
5284                 ap->flags ^= (MR_TOGGLE_TX);
5285                 if (ap->rxconfig & 0x0008)
5286                         ap->flags |= MR_TOGGLE_RX;
5287                 if (ap->rxconfig & ANEG_CFG_NP)
5288                         ap->flags |= MR_NP_RX;
5289                 ap->flags |= MR_PAGE_RX;
5290
5291                 ap->state = ANEG_STATE_COMPLETE_ACK;
5292                 ret = ANEG_TIMER_ENAB;
5293                 break;
5294
5295         case ANEG_STATE_COMPLETE_ACK:
5296                 if (ap->ability_match != 0 &&
5297                     ap->rxconfig == 0) {
5298                         ap->state = ANEG_STATE_AN_ENABLE;
5299                         break;
5300                 }
5301                 delta = ap->cur_time - ap->link_time;
5302                 if (delta > ANEG_STATE_SETTLE_TIME) {
5303                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5304                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5305                         } else {
5306                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5307                                     !(ap->flags & MR_NP_RX)) {
5308                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5309                                 } else {
5310                                         ret = ANEG_FAILED;
5311                                 }
5312                         }
5313                 }
5314                 break;
5315
5316         case ANEG_STATE_IDLE_DETECT_INIT:
5317                 ap->link_time = ap->cur_time;
5318                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5319                 tw32_f(MAC_MODE, tp->mac_mode);
5320                 udelay(40);
5321
5322                 ap->state = ANEG_STATE_IDLE_DETECT;
5323                 ret = ANEG_TIMER_ENAB;
5324                 break;
5325
5326         case ANEG_STATE_IDLE_DETECT:
5327                 if (ap->ability_match != 0 &&
5328                     ap->rxconfig == 0) {
5329                         ap->state = ANEG_STATE_AN_ENABLE;
5330                         break;
5331                 }
5332                 delta = ap->cur_time - ap->link_time;
5333                 if (delta > ANEG_STATE_SETTLE_TIME) {
5334                         /* XXX another gem from the Broadcom driver :( */
5335                         ap->state = ANEG_STATE_LINK_OK;
5336                 }
5337                 break;
5338
5339         case ANEG_STATE_LINK_OK:
5340                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5341                 ret = ANEG_DONE;
5342                 break;
5343
5344         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5345                 /* ??? unimplemented */
5346                 break;
5347
5348         case ANEG_STATE_NEXT_PAGE_WAIT:
5349                 /* ??? unimplemented */
5350                 break;
5351
5352         default:
5353                 ret = ANEG_FAILED;
5354                 break;
5355         }
5356
5357         return ret;
5358 }
5359
5360 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5361 {
5362         int res = 0;
5363         struct tg3_fiber_aneginfo aninfo;
5364         int status = ANEG_FAILED;
5365         unsigned int tick;
5366         u32 tmp;
5367
5368         tw32_f(MAC_TX_AUTO_NEG, 0);
5369
5370         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5371         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5372         udelay(40);
5373
5374         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5375         udelay(40);
5376
5377         memset(&aninfo, 0, sizeof(aninfo));
5378         aninfo.flags |= MR_AN_ENABLE;
5379         aninfo.state = ANEG_STATE_UNKNOWN;
5380         aninfo.cur_time = 0;
5381         tick = 0;
5382         while (++tick < 195000) {
5383                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5384                 if (status == ANEG_DONE || status == ANEG_FAILED)
5385                         break;
5386
5387                 udelay(1);
5388         }
5389
5390         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5391         tw32_f(MAC_MODE, tp->mac_mode);
5392         udelay(40);
5393
5394         *txflags = aninfo.txconfig;
5395         *rxflags = aninfo.flags;
5396
5397         if (status == ANEG_DONE &&
5398             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5399                              MR_LP_ADV_FULL_DUPLEX)))
5400                 res = 1;
5401
5402         return res;
5403 }
5404
5405 static void tg3_init_bcm8002(struct tg3 *tp)
5406 {
5407         u32 mac_status = tr32(MAC_STATUS);
5408         int i;
5409
5410         /* Reset when initting first time or we have a link. */
5411         if (tg3_flag(tp, INIT_COMPLETE) &&
5412             !(mac_status & MAC_STATUS_PCS_SYNCED))
5413                 return;
5414
5415         /* Set PLL lock range. */
5416         tg3_writephy(tp, 0x16, 0x8007);
5417
5418         /* SW reset */
5419         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5420
5421         /* Wait for reset to complete. */
5422         /* XXX schedule_timeout() ... */
5423         for (i = 0; i < 500; i++)
5424                 udelay(10);
5425
5426         /* Config mode; select PMA/Ch 1 regs. */
5427         tg3_writephy(tp, 0x10, 0x8411);
5428
5429         /* Enable auto-lock and comdet, select txclk for tx. */
5430         tg3_writephy(tp, 0x11, 0x0a10);
5431
5432         tg3_writephy(tp, 0x18, 0x00a0);
5433         tg3_writephy(tp, 0x16, 0x41ff);
5434
5435         /* Assert and deassert POR. */
5436         tg3_writephy(tp, 0x13, 0x0400);
5437         udelay(40);
5438         tg3_writephy(tp, 0x13, 0x0000);
5439
5440         tg3_writephy(tp, 0x11, 0x0a50);
5441         udelay(40);
5442         tg3_writephy(tp, 0x11, 0x0a10);
5443
5444         /* Wait for signal to stabilize */
5445         /* XXX schedule_timeout() ... */
5446         for (i = 0; i < 15000; i++)
5447                 udelay(10);
5448
5449         /* Deselect the channel register so we can read the PHYID
5450          * later.
5451          */
5452         tg3_writephy(tp, 0x10, 0x8011);
5453 }
5454
5455 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5456 {
5457         u16 flowctrl;
5458         bool current_link_up;
5459         u32 sg_dig_ctrl, sg_dig_status;
5460         u32 serdes_cfg, expected_sg_dig_ctrl;
5461         int workaround, port_a;
5462
5463         serdes_cfg = 0;
5464         expected_sg_dig_ctrl = 0;
5465         workaround = 0;
5466         port_a = 1;
5467         current_link_up = false;
5468
5469         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5470             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5471                 workaround = 1;
5472                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5473                         port_a = 0;
5474
5475                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5476                 /* preserve bits 20-23 for voltage regulator */
5477                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5478         }
5479
5480         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5481
5482         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5483                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5484                         if (workaround) {
5485                                 u32 val = serdes_cfg;
5486
5487                                 if (port_a)
5488                                         val |= 0xc010000;
5489                                 else
5490                                         val |= 0x4010000;
5491                                 tw32_f(MAC_SERDES_CFG, val);
5492                         }
5493
5494                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5495                 }
5496                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5497                         tg3_setup_flow_control(tp, 0, 0);
5498                         current_link_up = true;
5499                 }
5500                 goto out;
5501         }
5502
5503         /* Want auto-negotiation.  */
5504         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5505
5506         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5507         if (flowctrl & ADVERTISE_1000XPAUSE)
5508                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5509         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5510                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5511
5512         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5513                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5514                     tp->serdes_counter &&
5515                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5516                                     MAC_STATUS_RCVD_CFG)) ==
5517                      MAC_STATUS_PCS_SYNCED)) {
5518                         tp->serdes_counter--;
5519                         current_link_up = true;
5520                         goto out;
5521                 }
5522 restart_autoneg:
5523                 if (workaround)
5524                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5525                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5526                 udelay(5);
5527                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5528
5529                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5530                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5531         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5532                                  MAC_STATUS_SIGNAL_DET)) {
5533                 sg_dig_status = tr32(SG_DIG_STATUS);
5534                 mac_status = tr32(MAC_STATUS);
5535
5536                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5537                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5538                         u32 local_adv = 0, remote_adv = 0;
5539
5540                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5541                                 local_adv |= ADVERTISE_1000XPAUSE;
5542                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5543                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5544
5545                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5546                                 remote_adv |= LPA_1000XPAUSE;
5547                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5548                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5549
5550                         tp->link_config.rmt_adv =
5551                                            mii_adv_to_ethtool_adv_x(remote_adv);
5552
5553                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5554                         current_link_up = true;
5555                         tp->serdes_counter = 0;
5556                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5557                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5558                         if (tp->serdes_counter)
5559                                 tp->serdes_counter--;
5560                         else {
5561                                 if (workaround) {
5562                                         u32 val = serdes_cfg;
5563
5564                                         if (port_a)
5565                                                 val |= 0xc010000;
5566                                         else
5567                                                 val |= 0x4010000;
5568
5569                                         tw32_f(MAC_SERDES_CFG, val);
5570                                 }
5571
5572                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5573                                 udelay(40);
5574
5575                                 /* Link parallel detection - link is up */
5576                                 /* only if we have PCS_SYNC and not */
5577                                 /* receiving config code words */
5578                                 mac_status = tr32(MAC_STATUS);
5579                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5580                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5581                                         tg3_setup_flow_control(tp, 0, 0);
5582                                         current_link_up = true;
5583                                         tp->phy_flags |=
5584                                                 TG3_PHYFLG_PARALLEL_DETECT;
5585                                         tp->serdes_counter =
5586                                                 SERDES_PARALLEL_DET_TIMEOUT;
5587                                 } else
5588                                         goto restart_autoneg;
5589                         }
5590                 }
5591         } else {
5592                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5593                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5594         }
5595
5596 out:
5597         return current_link_up;
5598 }
5599
5600 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5601 {
5602         bool current_link_up = false;
5603
5604         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5605                 goto out;
5606
5607         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5608                 u32 txflags, rxflags;
5609                 int i;
5610
5611                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5612                         u32 local_adv = 0, remote_adv = 0;
5613
5614                         if (txflags & ANEG_CFG_PS1)
5615                                 local_adv |= ADVERTISE_1000XPAUSE;
5616                         if (txflags & ANEG_CFG_PS2)
5617                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5618
5619                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5620                                 remote_adv |= LPA_1000XPAUSE;
5621                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5622                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5623
5624                         tp->link_config.rmt_adv =
5625                                            mii_adv_to_ethtool_adv_x(remote_adv);
5626
5627                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5628
5629                         current_link_up = true;
5630                 }
5631                 for (i = 0; i < 30; i++) {
5632                         udelay(20);
5633                         tw32_f(MAC_STATUS,
5634                                (MAC_STATUS_SYNC_CHANGED |
5635                                 MAC_STATUS_CFG_CHANGED));
5636                         udelay(40);
5637                         if ((tr32(MAC_STATUS) &
5638                              (MAC_STATUS_SYNC_CHANGED |
5639                               MAC_STATUS_CFG_CHANGED)) == 0)
5640                                 break;
5641                 }
5642
5643                 mac_status = tr32(MAC_STATUS);
5644                 if (!current_link_up &&
5645                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5646                     !(mac_status & MAC_STATUS_RCVD_CFG))
5647                         current_link_up = true;
5648         } else {
5649                 tg3_setup_flow_control(tp, 0, 0);
5650
5651                 /* Forcing 1000FD link up. */
5652                 current_link_up = true;
5653
5654                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5655                 udelay(40);
5656
5657                 tw32_f(MAC_MODE, tp->mac_mode);
5658                 udelay(40);
5659         }
5660
5661 out:
5662         return current_link_up;
5663 }
5664
5665 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5666 {
5667         u32 orig_pause_cfg;
5668         u16 orig_active_speed;
5669         u8 orig_active_duplex;
5670         u32 mac_status;
5671         bool current_link_up;
5672         int i;
5673
5674         orig_pause_cfg = tp->link_config.active_flowctrl;
5675         orig_active_speed = tp->link_config.active_speed;
5676         orig_active_duplex = tp->link_config.active_duplex;
5677
5678         if (!tg3_flag(tp, HW_AUTONEG) &&
5679             tp->link_up &&
5680             tg3_flag(tp, INIT_COMPLETE)) {
5681                 mac_status = tr32(MAC_STATUS);
5682                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5683                                MAC_STATUS_SIGNAL_DET |
5684                                MAC_STATUS_CFG_CHANGED |
5685                                MAC_STATUS_RCVD_CFG);
5686                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5687                                    MAC_STATUS_SIGNAL_DET)) {
5688                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5689                                             MAC_STATUS_CFG_CHANGED));
5690                         return 0;
5691                 }
5692         }
5693
5694         tw32_f(MAC_TX_AUTO_NEG, 0);
5695
5696         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5697         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5698         tw32_f(MAC_MODE, tp->mac_mode);
5699         udelay(40);
5700
5701         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5702                 tg3_init_bcm8002(tp);
5703
5704         /* Enable link change event even when serdes polling.  */
5705         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5706         udelay(40);
5707
5708         current_link_up = false;
5709         tp->link_config.rmt_adv = 0;
5710         mac_status = tr32(MAC_STATUS);
5711
5712         if (tg3_flag(tp, HW_AUTONEG))
5713                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5714         else
5715                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5716
5717         tp->napi[0].hw_status->status =
5718                 (SD_STATUS_UPDATED |
5719                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5720
5721         for (i = 0; i < 100; i++) {
5722                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5723                                     MAC_STATUS_CFG_CHANGED));
5724                 udelay(5);
5725                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5726                                          MAC_STATUS_CFG_CHANGED |
5727                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5728                         break;
5729         }
5730
5731         mac_status = tr32(MAC_STATUS);
5732         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5733                 current_link_up = false;
5734                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5735                     tp->serdes_counter == 0) {
5736                         tw32_f(MAC_MODE, (tp->mac_mode |
5737                                           MAC_MODE_SEND_CONFIGS));
5738                         udelay(1);
5739                         tw32_f(MAC_MODE, tp->mac_mode);
5740                 }
5741         }
5742
5743         if (current_link_up) {
5744                 tp->link_config.active_speed = SPEED_1000;
5745                 tp->link_config.active_duplex = DUPLEX_FULL;
5746                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5747                                     LED_CTRL_LNKLED_OVERRIDE |
5748                                     LED_CTRL_1000MBPS_ON));
5749         } else {
5750                 tp->link_config.active_speed = SPEED_UNKNOWN;
5751                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5752                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5753                                     LED_CTRL_LNKLED_OVERRIDE |
5754                                     LED_CTRL_TRAFFIC_OVERRIDE));
5755         }
5756
5757         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5758                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5759                 if (orig_pause_cfg != now_pause_cfg ||
5760                     orig_active_speed != tp->link_config.active_speed ||
5761                     orig_active_duplex != tp->link_config.active_duplex)
5762                         tg3_link_report(tp);
5763         }
5764
5765         return 0;
5766 }
5767
5768 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5769 {
5770         int err = 0;
5771         u32 bmsr, bmcr;
5772         u16 current_speed = SPEED_UNKNOWN;
5773         u8 current_duplex = DUPLEX_UNKNOWN;
5774         bool current_link_up = false;
5775         u32 local_adv, remote_adv, sgsr;
5776
5777         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5778              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5779              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5780              (sgsr & SERDES_TG3_SGMII_MODE)) {
5781
5782                 if (force_reset)
5783                         tg3_phy_reset(tp);
5784
5785                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5786
5787                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5788                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5789                 } else {
5790                         current_link_up = true;
5791                         if (sgsr & SERDES_TG3_SPEED_1000) {
5792                                 current_speed = SPEED_1000;
5793                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5794                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5795                                 current_speed = SPEED_100;
5796                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5797                         } else {
5798                                 current_speed = SPEED_10;
5799                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5800                         }
5801
5802                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5803                                 current_duplex = DUPLEX_FULL;
5804                         else
5805                                 current_duplex = DUPLEX_HALF;
5806                 }
5807
5808                 tw32_f(MAC_MODE, tp->mac_mode);
5809                 udelay(40);
5810
5811                 tg3_clear_mac_status(tp);
5812
5813                 goto fiber_setup_done;
5814         }
5815
5816         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5817         tw32_f(MAC_MODE, tp->mac_mode);
5818         udelay(40);
5819
5820         tg3_clear_mac_status(tp);
5821
5822         if (force_reset)
5823                 tg3_phy_reset(tp);
5824
5825         tp->link_config.rmt_adv = 0;
5826
5827         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5828         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5829         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5830                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5831                         bmsr |= BMSR_LSTATUS;
5832                 else
5833                         bmsr &= ~BMSR_LSTATUS;
5834         }
5835
5836         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5837
5838         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5839             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5840                 /* do nothing, just check for link up at the end */
5841         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5842                 u32 adv, newadv;
5843
5844                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5845                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5846                                  ADVERTISE_1000XPAUSE |
5847                                  ADVERTISE_1000XPSE_ASYM |
5848                                  ADVERTISE_SLCT);
5849
5850                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5851                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5852
5853                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5854                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5855                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5856                         tg3_writephy(tp, MII_BMCR, bmcr);
5857
5858                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5859                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5860                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5861
5862                         return err;
5863                 }
5864         } else {
5865                 u32 new_bmcr;
5866
5867                 bmcr &= ~BMCR_SPEED1000;
5868                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5869
5870                 if (tp->link_config.duplex == DUPLEX_FULL)
5871                         new_bmcr |= BMCR_FULLDPLX;
5872
5873                 if (new_bmcr != bmcr) {
5874                         /* BMCR_SPEED1000 is a reserved bit that needs
5875                          * to be set on write.
5876                          */
5877                         new_bmcr |= BMCR_SPEED1000;
5878
5879                         /* Force a linkdown */
5880                         if (tp->link_up) {
5881                                 u32 adv;
5882
5883                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5884                                 adv &= ~(ADVERTISE_1000XFULL |
5885                                          ADVERTISE_1000XHALF |
5886                                          ADVERTISE_SLCT);
5887                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5888                                 tg3_writephy(tp, MII_BMCR, bmcr |
5889                                                            BMCR_ANRESTART |
5890                                                            BMCR_ANENABLE);
5891                                 udelay(10);
5892                                 tg3_carrier_off(tp);
5893                         }
5894                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5895                         bmcr = new_bmcr;
5896                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5897                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5898                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5899                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5900                                         bmsr |= BMSR_LSTATUS;
5901                                 else
5902                                         bmsr &= ~BMSR_LSTATUS;
5903                         }
5904                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5905                 }
5906         }
5907
5908         if (bmsr & BMSR_LSTATUS) {
5909                 current_speed = SPEED_1000;
5910                 current_link_up = true;
5911                 if (bmcr & BMCR_FULLDPLX)
5912                         current_duplex = DUPLEX_FULL;
5913                 else
5914                         current_duplex = DUPLEX_HALF;
5915
5916                 local_adv = 0;
5917                 remote_adv = 0;
5918
5919                 if (bmcr & BMCR_ANENABLE) {
5920                         u32 common;
5921
5922                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5923                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5924                         common = local_adv & remote_adv;
5925                         if (common & (ADVERTISE_1000XHALF |
5926                                       ADVERTISE_1000XFULL)) {
5927                                 if (common & ADVERTISE_1000XFULL)
5928                                         current_duplex = DUPLEX_FULL;
5929                                 else
5930                                         current_duplex = DUPLEX_HALF;
5931
5932                                 tp->link_config.rmt_adv =
5933                                            mii_adv_to_ethtool_adv_x(remote_adv);
5934                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5935                                 /* Link is up via parallel detect */
5936                         } else {
5937                                 current_link_up = false;
5938                         }
5939                 }
5940         }
5941
5942 fiber_setup_done:
5943         if (current_link_up && current_duplex == DUPLEX_FULL)
5944                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5945
5946         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5947         if (tp->link_config.active_duplex == DUPLEX_HALF)
5948                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5949
5950         tw32_f(MAC_MODE, tp->mac_mode);
5951         udelay(40);
5952
5953         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5954
5955         tp->link_config.active_speed = current_speed;
5956         tp->link_config.active_duplex = current_duplex;
5957
5958         tg3_test_and_report_link_chg(tp, current_link_up);
5959         return err;
5960 }
5961
5962 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5963 {
5964         if (tp->serdes_counter) {
5965                 /* Give autoneg time to complete. */
5966                 tp->serdes_counter--;
5967                 return;
5968         }
5969
5970         if (!tp->link_up &&
5971             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5972                 u32 bmcr;
5973
5974                 tg3_readphy(tp, MII_BMCR, &bmcr);
5975                 if (bmcr & BMCR_ANENABLE) {
5976                         u32 phy1, phy2;
5977
5978                         /* Select shadow register 0x1f */
5979                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5980                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5981
5982                         /* Select expansion interrupt status register */
5983                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5984                                          MII_TG3_DSP_EXP1_INT_STAT);
5985                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5986                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5987
5988                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5989                                 /* We have signal detect and not receiving
5990                                  * config code words, link is up by parallel
5991                                  * detection.
5992                                  */
5993
5994                                 bmcr &= ~BMCR_ANENABLE;
5995                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5996                                 tg3_writephy(tp, MII_BMCR, bmcr);
5997                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5998                         }
5999                 }
6000         } else if (tp->link_up &&
6001                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6002                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6003                 u32 phy2;
6004
6005                 /* Select expansion interrupt status register */
6006                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6007                                  MII_TG3_DSP_EXP1_INT_STAT);
6008                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6009                 if (phy2 & 0x20) {
6010                         u32 bmcr;
6011
6012                         /* Config code words received, turn on autoneg. */
6013                         tg3_readphy(tp, MII_BMCR, &bmcr);
6014                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6015
6016                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6017
6018                 }
6019         }
6020 }
6021
6022 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6023 {
6024         u32 val;
6025         int err;
6026
6027         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6028                 err = tg3_setup_fiber_phy(tp, force_reset);
6029         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6030                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6031         else
6032                 err = tg3_setup_copper_phy(tp, force_reset);
6033
6034         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6035                 u32 scale;
6036
6037                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6038                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6039                         scale = 65;
6040                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6041                         scale = 6;
6042                 else
6043                         scale = 12;
6044
6045                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6046                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6047                 tw32(GRC_MISC_CFG, val);
6048         }
6049
6050         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6051               (6 << TX_LENGTHS_IPG_SHIFT);
6052         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6053             tg3_asic_rev(tp) == ASIC_REV_5762)
6054                 val |= tr32(MAC_TX_LENGTHS) &
6055                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6056                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6057
6058         if (tp->link_config.active_speed == SPEED_1000 &&
6059             tp->link_config.active_duplex == DUPLEX_HALF)
6060                 tw32(MAC_TX_LENGTHS, val |
6061                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6062         else
6063                 tw32(MAC_TX_LENGTHS, val |
6064                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6065
6066         if (!tg3_flag(tp, 5705_PLUS)) {
6067                 if (tp->link_up) {
6068                         tw32(HOSTCC_STAT_COAL_TICKS,
6069                              tp->coal.stats_block_coalesce_usecs);
6070                 } else {
6071                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6072                 }
6073         }
6074
6075         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6076                 val = tr32(PCIE_PWR_MGMT_THRESH);
6077                 if (!tp->link_up)
6078                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6079                               tp->pwrmgmt_thresh;
6080                 else
6081                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6082                 tw32(PCIE_PWR_MGMT_THRESH, val);
6083         }
6084
6085         return err;
6086 }
6087
6088 /* tp->lock must be held */
6089 static u64 tg3_refclk_read(struct tg3 *tp)
6090 {
6091         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6092         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6093 }
6094
6095 /* tp->lock must be held */
6096 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6097 {
6098         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6099         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6100         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6101         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6102 }
6103
6104 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6105 static inline void tg3_full_unlock(struct tg3 *tp);
6106 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6107 {
6108         struct tg3 *tp = netdev_priv(dev);
6109
6110         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6111                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6112                                 SOF_TIMESTAMPING_SOFTWARE;
6113
6114         if (tg3_flag(tp, PTP_CAPABLE)) {
6115                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6116                                         SOF_TIMESTAMPING_RX_HARDWARE |
6117                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6118         }
6119
6120         if (tp->ptp_clock)
6121                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6122         else
6123                 info->phc_index = -1;
6124
6125         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6126
6127         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6128                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6129                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6130                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6131         return 0;
6132 }
6133
6134 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6135 {
6136         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6137         bool neg_adj = false;
6138         u32 correction = 0;
6139
6140         if (ppb < 0) {
6141                 neg_adj = true;
6142                 ppb = -ppb;
6143         }
6144
6145         /* Frequency adjustment is performed using hardware with a 24 bit
6146          * accumulator and a programmable correction value. On each clk, the
6147          * correction value gets added to the accumulator and when it
6148          * overflows, the time counter is incremented/decremented.
6149          *
6150          * So conversion from ppb to correction value is
6151          *              ppb * (1 << 24) / 1000000000
6152          */
6153         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6154                      TG3_EAV_REF_CLK_CORRECT_MASK;
6155
6156         tg3_full_lock(tp, 0);
6157
6158         if (correction)
6159                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6160                      TG3_EAV_REF_CLK_CORRECT_EN |
6161                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6162         else
6163                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6164
6165         tg3_full_unlock(tp);
6166
6167         return 0;
6168 }
6169
6170 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6171 {
6172         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6173
6174         tg3_full_lock(tp, 0);
6175         tp->ptp_adjust += delta;
6176         tg3_full_unlock(tp);
6177
6178         return 0;
6179 }
6180
6181 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6182 {
6183         u64 ns;
6184         u32 remainder;
6185         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6186
6187         tg3_full_lock(tp, 0);
6188         ns = tg3_refclk_read(tp);
6189         ns += tp->ptp_adjust;
6190         tg3_full_unlock(tp);
6191
6192         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6193         ts->tv_nsec = remainder;
6194
6195         return 0;
6196 }
6197
6198 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6199                            const struct timespec *ts)
6200 {
6201         u64 ns;
6202         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6203
6204         ns = timespec_to_ns(ts);
6205
6206         tg3_full_lock(tp, 0);
6207         tg3_refclk_write(tp, ns);
6208         tp->ptp_adjust = 0;
6209         tg3_full_unlock(tp);
6210
6211         return 0;
6212 }
6213
6214 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6215                           struct ptp_clock_request *rq, int on)
6216 {
6217         return -EOPNOTSUPP;
6218 }
6219
6220 static const struct ptp_clock_info tg3_ptp_caps = {
6221         .owner          = THIS_MODULE,
6222         .name           = "tg3 clock",
6223         .max_adj        = 250000000,
6224         .n_alarm        = 0,
6225         .n_ext_ts       = 0,
6226         .n_per_out      = 0,
6227         .pps            = 0,
6228         .adjfreq        = tg3_ptp_adjfreq,
6229         .adjtime        = tg3_ptp_adjtime,
6230         .gettime        = tg3_ptp_gettime,
6231         .settime        = tg3_ptp_settime,
6232         .enable         = tg3_ptp_enable,
6233 };
6234
6235 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6236                                      struct skb_shared_hwtstamps *timestamp)
6237 {
6238         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6239         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6240                                            tp->ptp_adjust);
6241 }
6242
6243 /* tp->lock must be held */
6244 static void tg3_ptp_init(struct tg3 *tp)
6245 {
6246         if (!tg3_flag(tp, PTP_CAPABLE))
6247                 return;
6248
6249         /* Initialize the hardware clock to the system time. */
6250         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6251         tp->ptp_adjust = 0;
6252         tp->ptp_info = tg3_ptp_caps;
6253 }
6254
6255 /* tp->lock must be held */
6256 static void tg3_ptp_resume(struct tg3 *tp)
6257 {
6258         if (!tg3_flag(tp, PTP_CAPABLE))
6259                 return;
6260
6261         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6262         tp->ptp_adjust = 0;
6263 }
6264
6265 static void tg3_ptp_fini(struct tg3 *tp)
6266 {
6267         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6268                 return;
6269
6270         ptp_clock_unregister(tp->ptp_clock);
6271         tp->ptp_clock = NULL;
6272         tp->ptp_adjust = 0;
6273 }
6274
6275 static inline int tg3_irq_sync(struct tg3 *tp)
6276 {
6277         return tp->irq_sync;
6278 }
6279
6280 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6281 {
6282         int i;
6283
6284         dst = (u32 *)((u8 *)dst + off);
6285         for (i = 0; i < len; i += sizeof(u32))
6286                 *dst++ = tr32(off + i);
6287 }
6288
6289 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6290 {
6291         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6292         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6293         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6294         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6295         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6296         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6297         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6298         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6299         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6300         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6301         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6302         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6303         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6304         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6305         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6306         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6307         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6308         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6309         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6310
6311         if (tg3_flag(tp, SUPPORT_MSIX))
6312                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6313
6314         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6315         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6316         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6317         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6318         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6319         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6320         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6321         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6322
6323         if (!tg3_flag(tp, 5705_PLUS)) {
6324                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6325                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6326                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6327         }
6328
6329         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6330         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6331         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6332         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6333         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6334
6335         if (tg3_flag(tp, NVRAM))
6336                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6337 }
6338
6339 static void tg3_dump_state(struct tg3 *tp)
6340 {
6341         int i;
6342         u32 *regs;
6343
6344         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6345         if (!regs)
6346                 return;
6347
6348         if (tg3_flag(tp, PCI_EXPRESS)) {
6349                 /* Read up to but not including private PCI registers */
6350                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6351                         regs[i / sizeof(u32)] = tr32(i);
6352         } else
6353                 tg3_dump_legacy_regs(tp, regs);
6354
6355         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6356                 if (!regs[i + 0] && !regs[i + 1] &&
6357                     !regs[i + 2] && !regs[i + 3])
6358                         continue;
6359
6360                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6361                            i * 4,
6362                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6363         }
6364
6365         kfree(regs);
6366
6367         for (i = 0; i < tp->irq_cnt; i++) {
6368                 struct tg3_napi *tnapi = &tp->napi[i];
6369
6370                 /* SW status block */
6371                 netdev_err(tp->dev,
6372                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6373                            i,
6374                            tnapi->hw_status->status,
6375                            tnapi->hw_status->status_tag,
6376                            tnapi->hw_status->rx_jumbo_consumer,
6377                            tnapi->hw_status->rx_consumer,
6378                            tnapi->hw_status->rx_mini_consumer,
6379                            tnapi->hw_status->idx[0].rx_producer,
6380                            tnapi->hw_status->idx[0].tx_consumer);
6381
6382                 netdev_err(tp->dev,
6383                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6384                            i,
6385                            tnapi->last_tag, tnapi->last_irq_tag,
6386                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6387                            tnapi->rx_rcb_ptr,
6388                            tnapi->prodring.rx_std_prod_idx,
6389                            tnapi->prodring.rx_std_cons_idx,
6390                            tnapi->prodring.rx_jmb_prod_idx,
6391                            tnapi->prodring.rx_jmb_cons_idx);
6392         }
6393 }
6394
6395 /* This is called whenever we suspect that the system chipset is re-
6396  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6397  * is bogus tx completions. We try to recover by setting the
6398  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6399  * in the workqueue.
6400  */
6401 static void tg3_tx_recover(struct tg3 *tp)
6402 {
6403         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6404                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6405
6406         netdev_warn(tp->dev,
6407                     "The system may be re-ordering memory-mapped I/O "
6408                     "cycles to the network device, attempting to recover. "
6409                     "Please report the problem to the driver maintainer "
6410                     "and include system chipset information.\n");
6411
6412         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6413 }
6414
6415 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6416 {
6417         /* Tell compiler to fetch tx indices from memory. */
6418         barrier();
6419         return tnapi->tx_pending -
6420                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6421 }
6422
6423 /* Tigon3 never reports partial packet sends.  So we do not
6424  * need special logic to handle SKBs that have not had all
6425  * of their frags sent yet, like SunGEM does.
6426  */
6427 static void tg3_tx(struct tg3_napi *tnapi)
6428 {
6429         struct tg3 *tp = tnapi->tp;
6430         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6431         u32 sw_idx = tnapi->tx_cons;
6432         struct netdev_queue *txq;
6433         int index = tnapi - tp->napi;
6434         unsigned int pkts_compl = 0, bytes_compl = 0;
6435
6436         if (tg3_flag(tp, ENABLE_TSS))
6437                 index--;
6438
6439         txq = netdev_get_tx_queue(tp->dev, index);
6440
6441         while (sw_idx != hw_idx) {
6442                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6443                 struct sk_buff *skb = ri->skb;
6444                 int i, tx_bug = 0;
6445
6446                 if (unlikely(skb == NULL)) {
6447                         tg3_tx_recover(tp);
6448                         return;
6449                 }
6450
6451                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6452                         struct skb_shared_hwtstamps timestamp;
6453                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6454                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6455
6456                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6457
6458                         skb_tstamp_tx(skb, &timestamp);
6459                 }
6460
6461                 pci_unmap_single(tp->pdev,
6462                                  dma_unmap_addr(ri, mapping),
6463                                  skb_headlen(skb),
6464                                  PCI_DMA_TODEVICE);
6465
6466                 ri->skb = NULL;
6467
6468                 while (ri->fragmented) {
6469                         ri->fragmented = false;
6470                         sw_idx = NEXT_TX(sw_idx);
6471                         ri = &tnapi->tx_buffers[sw_idx];
6472                 }
6473
6474                 sw_idx = NEXT_TX(sw_idx);
6475
6476                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6477                         ri = &tnapi->tx_buffers[sw_idx];
6478                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6479                                 tx_bug = 1;
6480
6481                         pci_unmap_page(tp->pdev,
6482                                        dma_unmap_addr(ri, mapping),
6483                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6484                                        PCI_DMA_TODEVICE);
6485
6486                         while (ri->fragmented) {
6487                                 ri->fragmented = false;
6488                                 sw_idx = NEXT_TX(sw_idx);
6489                                 ri = &tnapi->tx_buffers[sw_idx];
6490                         }
6491
6492                         sw_idx = NEXT_TX(sw_idx);
6493                 }
6494
6495                 pkts_compl++;
6496                 bytes_compl += skb->len;
6497
6498                 dev_kfree_skb(skb);
6499
6500                 if (unlikely(tx_bug)) {
6501                         tg3_tx_recover(tp);
6502                         return;
6503                 }
6504         }
6505
6506         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6507
6508         tnapi->tx_cons = sw_idx;
6509
6510         /* Need to make the tx_cons update visible to tg3_start_xmit()
6511          * before checking for netif_queue_stopped().  Without the
6512          * memory barrier, there is a small possibility that tg3_start_xmit()
6513          * will miss it and cause the queue to be stopped forever.
6514          */
6515         smp_mb();
6516
6517         if (unlikely(netif_tx_queue_stopped(txq) &&
6518                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6519                 __netif_tx_lock(txq, smp_processor_id());
6520                 if (netif_tx_queue_stopped(txq) &&
6521                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6522                         netif_tx_wake_queue(txq);
6523                 __netif_tx_unlock(txq);
6524         }
6525 }
6526
6527 static void tg3_frag_free(bool is_frag, void *data)
6528 {
6529         if (is_frag)
6530                 put_page(virt_to_head_page(data));
6531         else
6532                 kfree(data);
6533 }
6534
6535 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6536 {
6537         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6538                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6539
6540         if (!ri->data)
6541                 return;
6542
6543         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6544                          map_sz, PCI_DMA_FROMDEVICE);
6545         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6546         ri->data = NULL;
6547 }
6548
6549
6550 /* Returns size of skb allocated or < 0 on error.
6551  *
6552  * We only need to fill in the address because the other members
6553  * of the RX descriptor are invariant, see tg3_init_rings.
6554  *
6555  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6556  * posting buffers we only dirty the first cache line of the RX
6557  * descriptor (containing the address).  Whereas for the RX status
6558  * buffers the cpu only reads the last cacheline of the RX descriptor
6559  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6560  */
6561 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6562                              u32 opaque_key, u32 dest_idx_unmasked,
6563                              unsigned int *frag_size)
6564 {
6565         struct tg3_rx_buffer_desc *desc;
6566         struct ring_info *map;
6567         u8 *data;
6568         dma_addr_t mapping;
6569         int skb_size, data_size, dest_idx;
6570
6571         switch (opaque_key) {
6572         case RXD_OPAQUE_RING_STD:
6573                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6574                 desc = &tpr->rx_std[dest_idx];
6575                 map = &tpr->rx_std_buffers[dest_idx];
6576                 data_size = tp->rx_pkt_map_sz;
6577                 break;
6578
6579         case RXD_OPAQUE_RING_JUMBO:
6580                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6581                 desc = &tpr->rx_jmb[dest_idx].std;
6582                 map = &tpr->rx_jmb_buffers[dest_idx];
6583                 data_size = TG3_RX_JMB_MAP_SZ;
6584                 break;
6585
6586         default:
6587                 return -EINVAL;
6588         }
6589
6590         /* Do not overwrite any of the map or rp information
6591          * until we are sure we can commit to a new buffer.
6592          *
6593          * Callers depend upon this behavior and assume that
6594          * we leave everything unchanged if we fail.
6595          */
6596         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6597                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6598         if (skb_size <= PAGE_SIZE) {
6599                 data = netdev_alloc_frag(skb_size);
6600                 *frag_size = skb_size;
6601         } else {
6602                 data = kmalloc(skb_size, GFP_ATOMIC);
6603                 *frag_size = 0;
6604         }
6605         if (!data)
6606                 return -ENOMEM;
6607
6608         mapping = pci_map_single(tp->pdev,
6609                                  data + TG3_RX_OFFSET(tp),
6610                                  data_size,
6611                                  PCI_DMA_FROMDEVICE);
6612         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6613                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6614                 return -EIO;
6615         }
6616
6617         map->data = data;
6618         dma_unmap_addr_set(map, mapping, mapping);
6619
6620         desc->addr_hi = ((u64)mapping >> 32);
6621         desc->addr_lo = ((u64)mapping & 0xffffffff);
6622
6623         return data_size;
6624 }
6625
6626 /* We only need to move over in the address because the other
6627  * members of the RX descriptor are invariant.  See notes above
6628  * tg3_alloc_rx_data for full details.
6629  */
6630 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6631                            struct tg3_rx_prodring_set *dpr,
6632                            u32 opaque_key, int src_idx,
6633                            u32 dest_idx_unmasked)
6634 {
6635         struct tg3 *tp = tnapi->tp;
6636         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6637         struct ring_info *src_map, *dest_map;
6638         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6639         int dest_idx;
6640
6641         switch (opaque_key) {
6642         case RXD_OPAQUE_RING_STD:
6643                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6644                 dest_desc = &dpr->rx_std[dest_idx];
6645                 dest_map = &dpr->rx_std_buffers[dest_idx];
6646                 src_desc = &spr->rx_std[src_idx];
6647                 src_map = &spr->rx_std_buffers[src_idx];
6648                 break;
6649
6650         case RXD_OPAQUE_RING_JUMBO:
6651                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6652                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6653                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6654                 src_desc = &spr->rx_jmb[src_idx].std;
6655                 src_map = &spr->rx_jmb_buffers[src_idx];
6656                 break;
6657
6658         default:
6659                 return;
6660         }
6661
6662         dest_map->data = src_map->data;
6663         dma_unmap_addr_set(dest_map, mapping,
6664                            dma_unmap_addr(src_map, mapping));
6665         dest_desc->addr_hi = src_desc->addr_hi;
6666         dest_desc->addr_lo = src_desc->addr_lo;
6667
6668         /* Ensure that the update to the skb happens after the physical
6669          * addresses have been transferred to the new BD location.
6670          */
6671         smp_wmb();
6672
6673         src_map->data = NULL;
6674 }
6675
6676 /* The RX ring scheme is composed of multiple rings which post fresh
6677  * buffers to the chip, and one special ring the chip uses to report
6678  * status back to the host.
6679  *
6680  * The special ring reports the status of received packets to the
6681  * host.  The chip does not write into the original descriptor the
6682  * RX buffer was obtained from.  The chip simply takes the original
6683  * descriptor as provided by the host, updates the status and length
6684  * field, then writes this into the next status ring entry.
6685  *
6686  * Each ring the host uses to post buffers to the chip is described
6687  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6688  * it is first placed into the on-chip ram.  When the packet's length
6689  * is known, it walks down the TG3_BDINFO entries to select the ring.
6690  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6691  * which is within the range of the new packet's length is chosen.
6692  *
6693  * The "separate ring for rx status" scheme may sound queer, but it makes
6694  * sense from a cache coherency perspective.  If only the host writes
6695  * to the buffer post rings, and only the chip writes to the rx status
6696  * rings, then cache lines never move beyond shared-modified state.
6697  * If both the host and chip were to write into the same ring, cache line
6698  * eviction could occur since both entities want it in an exclusive state.
6699  */
6700 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6701 {
6702         struct tg3 *tp = tnapi->tp;
6703         u32 work_mask, rx_std_posted = 0;
6704         u32 std_prod_idx, jmb_prod_idx;
6705         u32 sw_idx = tnapi->rx_rcb_ptr;
6706         u16 hw_idx;
6707         int received;
6708         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6709
6710         hw_idx = *(tnapi->rx_rcb_prod_idx);
6711         /*
6712          * We need to order the read of hw_idx and the read of
6713          * the opaque cookie.
6714          */
6715         rmb();
6716         work_mask = 0;
6717         received = 0;
6718         std_prod_idx = tpr->rx_std_prod_idx;
6719         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6720         while (sw_idx != hw_idx && budget > 0) {
6721                 struct ring_info *ri;
6722                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6723                 unsigned int len;
6724                 struct sk_buff *skb;
6725                 dma_addr_t dma_addr;
6726                 u32 opaque_key, desc_idx, *post_ptr;
6727                 u8 *data;
6728                 u64 tstamp = 0;
6729
6730                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6731                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6732                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6733                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6734                         dma_addr = dma_unmap_addr(ri, mapping);
6735                         data = ri->data;
6736                         post_ptr = &std_prod_idx;
6737                         rx_std_posted++;
6738                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6739                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6740                         dma_addr = dma_unmap_addr(ri, mapping);
6741                         data = ri->data;
6742                         post_ptr = &jmb_prod_idx;
6743                 } else
6744                         goto next_pkt_nopost;
6745
6746                 work_mask |= opaque_key;
6747
6748                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6749                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6750                 drop_it:
6751                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6752                                        desc_idx, *post_ptr);
6753                 drop_it_no_recycle:
6754                         /* Other statistics kept track of by card. */
6755                         tp->rx_dropped++;
6756                         goto next_pkt;
6757                 }
6758
6759                 prefetch(data + TG3_RX_OFFSET(tp));
6760                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6761                       ETH_FCS_LEN;
6762
6763                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6764                      RXD_FLAG_PTPSTAT_PTPV1 ||
6765                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6766                      RXD_FLAG_PTPSTAT_PTPV2) {
6767                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6768                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6769                 }
6770
6771                 if (len > TG3_RX_COPY_THRESH(tp)) {
6772                         int skb_size;
6773                         unsigned int frag_size;
6774
6775                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6776                                                     *post_ptr, &frag_size);
6777                         if (skb_size < 0)
6778                                 goto drop_it;
6779
6780                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6781                                          PCI_DMA_FROMDEVICE);
6782
6783                         skb = build_skb(data, frag_size);
6784                         if (!skb) {
6785                                 tg3_frag_free(frag_size != 0, data);
6786                                 goto drop_it_no_recycle;
6787                         }
6788                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6789                         /* Ensure that the update to the data happens
6790                          * after the usage of the old DMA mapping.
6791                          */
6792                         smp_wmb();
6793
6794                         ri->data = NULL;
6795
6796                 } else {
6797                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6798                                        desc_idx, *post_ptr);
6799
6800                         skb = netdev_alloc_skb(tp->dev,
6801                                                len + TG3_RAW_IP_ALIGN);
6802                         if (skb == NULL)
6803                                 goto drop_it_no_recycle;
6804
6805                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6806                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6807                         memcpy(skb->data,
6808                                data + TG3_RX_OFFSET(tp),
6809                                len);
6810                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6811                 }
6812
6813                 skb_put(skb, len);
6814                 if (tstamp)
6815                         tg3_hwclock_to_timestamp(tp, tstamp,
6816                                                  skb_hwtstamps(skb));
6817
6818                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6819                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6820                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6821                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6822                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6823                 else
6824                         skb_checksum_none_assert(skb);
6825
6826                 skb->protocol = eth_type_trans(skb, tp->dev);
6827
6828                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6829                     skb->protocol != htons(ETH_P_8021Q)) {
6830                         dev_kfree_skb(skb);
6831                         goto drop_it_no_recycle;
6832                 }
6833
6834                 if (desc->type_flags & RXD_FLAG_VLAN &&
6835                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6836                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6837                                                desc->err_vlan & RXD_VLAN_MASK);
6838
6839                 napi_gro_receive(&tnapi->napi, skb);
6840
6841                 received++;
6842                 budget--;
6843
6844 next_pkt:
6845                 (*post_ptr)++;
6846
6847                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6848                         tpr->rx_std_prod_idx = std_prod_idx &
6849                                                tp->rx_std_ring_mask;
6850                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6851                                      tpr->rx_std_prod_idx);
6852                         work_mask &= ~RXD_OPAQUE_RING_STD;
6853                         rx_std_posted = 0;
6854                 }
6855 next_pkt_nopost:
6856                 sw_idx++;
6857                 sw_idx &= tp->rx_ret_ring_mask;
6858
6859                 /* Refresh hw_idx to see if there is new work */
6860                 if (sw_idx == hw_idx) {
6861                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6862                         rmb();
6863                 }
6864         }
6865
6866         /* ACK the status ring. */
6867         tnapi->rx_rcb_ptr = sw_idx;
6868         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6869
6870         /* Refill RX ring(s). */
6871         if (!tg3_flag(tp, ENABLE_RSS)) {
6872                 /* Sync BD data before updating mailbox */
6873                 wmb();
6874
6875                 if (work_mask & RXD_OPAQUE_RING_STD) {
6876                         tpr->rx_std_prod_idx = std_prod_idx &
6877                                                tp->rx_std_ring_mask;
6878                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6879                                      tpr->rx_std_prod_idx);
6880                 }
6881                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6882                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6883                                                tp->rx_jmb_ring_mask;
6884                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6885                                      tpr->rx_jmb_prod_idx);
6886                 }
6887                 mmiowb();
6888         } else if (work_mask) {
6889                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6890                  * updated before the producer indices can be updated.
6891                  */
6892                 smp_wmb();
6893
6894                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6895                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6896
6897                 if (tnapi != &tp->napi[1]) {
6898                         tp->rx_refill = true;
6899                         napi_schedule(&tp->napi[1].napi);
6900                 }
6901         }
6902
6903         return received;
6904 }
6905
6906 static void tg3_poll_link(struct tg3 *tp)
6907 {
6908         /* handle link change and other phy events */
6909         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6910                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6911
6912                 if (sblk->status & SD_STATUS_LINK_CHG) {
6913                         sblk->status = SD_STATUS_UPDATED |
6914                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6915                         spin_lock(&tp->lock);
6916                         if (tg3_flag(tp, USE_PHYLIB)) {
6917                                 tw32_f(MAC_STATUS,
6918                                      (MAC_STATUS_SYNC_CHANGED |
6919                                       MAC_STATUS_CFG_CHANGED |
6920                                       MAC_STATUS_MI_COMPLETION |
6921                                       MAC_STATUS_LNKSTATE_CHANGED));
6922                                 udelay(40);
6923                         } else
6924                                 tg3_setup_phy(tp, false);
6925                         spin_unlock(&tp->lock);
6926                 }
6927         }
6928 }
6929
6930 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6931                                 struct tg3_rx_prodring_set *dpr,
6932                                 struct tg3_rx_prodring_set *spr)
6933 {
6934         u32 si, di, cpycnt, src_prod_idx;
6935         int i, err = 0;
6936
6937         while (1) {
6938                 src_prod_idx = spr->rx_std_prod_idx;
6939
6940                 /* Make sure updates to the rx_std_buffers[] entries and the
6941                  * standard producer index are seen in the correct order.
6942                  */
6943                 smp_rmb();
6944
6945                 if (spr->rx_std_cons_idx == src_prod_idx)
6946                         break;
6947
6948                 if (spr->rx_std_cons_idx < src_prod_idx)
6949                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6950                 else
6951                         cpycnt = tp->rx_std_ring_mask + 1 -
6952                                  spr->rx_std_cons_idx;
6953
6954                 cpycnt = min(cpycnt,
6955                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6956
6957                 si = spr->rx_std_cons_idx;
6958                 di = dpr->rx_std_prod_idx;
6959
6960                 for (i = di; i < di + cpycnt; i++) {
6961                         if (dpr->rx_std_buffers[i].data) {
6962                                 cpycnt = i - di;
6963                                 err = -ENOSPC;
6964                                 break;
6965                         }
6966                 }
6967
6968                 if (!cpycnt)
6969                         break;
6970
6971                 /* Ensure that updates to the rx_std_buffers ring and the
6972                  * shadowed hardware producer ring from tg3_recycle_skb() are
6973                  * ordered correctly WRT the skb check above.
6974                  */
6975                 smp_rmb();
6976
6977                 memcpy(&dpr->rx_std_buffers[di],
6978                        &spr->rx_std_buffers[si],
6979                        cpycnt * sizeof(struct ring_info));
6980
6981                 for (i = 0; i < cpycnt; i++, di++, si++) {
6982                         struct tg3_rx_buffer_desc *sbd, *dbd;
6983                         sbd = &spr->rx_std[si];
6984                         dbd = &dpr->rx_std[di];
6985                         dbd->addr_hi = sbd->addr_hi;
6986                         dbd->addr_lo = sbd->addr_lo;
6987                 }
6988
6989                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6990                                        tp->rx_std_ring_mask;
6991                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6992                                        tp->rx_std_ring_mask;
6993         }
6994
6995         while (1) {
6996                 src_prod_idx = spr->rx_jmb_prod_idx;
6997
6998                 /* Make sure updates to the rx_jmb_buffers[] entries and
6999                  * the jumbo producer index are seen in the correct order.
7000                  */
7001                 smp_rmb();
7002
7003                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7004                         break;
7005
7006                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7007                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7008                 else
7009                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7010                                  spr->rx_jmb_cons_idx;
7011
7012                 cpycnt = min(cpycnt,
7013                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7014
7015                 si = spr->rx_jmb_cons_idx;
7016                 di = dpr->rx_jmb_prod_idx;
7017
7018                 for (i = di; i < di + cpycnt; i++) {
7019                         if (dpr->rx_jmb_buffers[i].data) {
7020                                 cpycnt = i - di;
7021                                 err = -ENOSPC;
7022                                 break;
7023                         }
7024                 }
7025
7026                 if (!cpycnt)
7027                         break;
7028
7029                 /* Ensure that updates to the rx_jmb_buffers ring and the
7030                  * shadowed hardware producer ring from tg3_recycle_skb() are
7031                  * ordered correctly WRT the skb check above.
7032                  */
7033                 smp_rmb();
7034
7035                 memcpy(&dpr->rx_jmb_buffers[di],
7036                        &spr->rx_jmb_buffers[si],
7037                        cpycnt * sizeof(struct ring_info));
7038
7039                 for (i = 0; i < cpycnt; i++, di++, si++) {
7040                         struct tg3_rx_buffer_desc *sbd, *dbd;
7041                         sbd = &spr->rx_jmb[si].std;
7042                         dbd = &dpr->rx_jmb[di].std;
7043                         dbd->addr_hi = sbd->addr_hi;
7044                         dbd->addr_lo = sbd->addr_lo;
7045                 }
7046
7047                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7048                                        tp->rx_jmb_ring_mask;
7049                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7050                                        tp->rx_jmb_ring_mask;
7051         }
7052
7053         return err;
7054 }
7055
7056 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7057 {
7058         struct tg3 *tp = tnapi->tp;
7059
7060         /* run TX completion thread */
7061         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7062                 tg3_tx(tnapi);
7063                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7064                         return work_done;
7065         }
7066
7067         if (!tnapi->rx_rcb_prod_idx)
7068                 return work_done;
7069
7070         /* run RX thread, within the bounds set by NAPI.
7071          * All RX "locking" is done by ensuring outside
7072          * code synchronizes with tg3->napi.poll()
7073          */
7074         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7075                 work_done += tg3_rx(tnapi, budget - work_done);
7076
7077         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7078                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7079                 int i, err = 0;
7080                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7081                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7082
7083                 tp->rx_refill = false;
7084                 for (i = 1; i <= tp->rxq_cnt; i++)
7085                         err |= tg3_rx_prodring_xfer(tp, dpr,
7086                                                     &tp->napi[i].prodring);
7087
7088                 wmb();
7089
7090                 if (std_prod_idx != dpr->rx_std_prod_idx)
7091                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7092                                      dpr->rx_std_prod_idx);
7093
7094                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7095                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7096                                      dpr->rx_jmb_prod_idx);
7097
7098                 mmiowb();
7099
7100                 if (err)
7101                         tw32_f(HOSTCC_MODE, tp->coal_now);
7102         }
7103
7104         return work_done;
7105 }
7106
7107 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7108 {
7109         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7110                 schedule_work(&tp->reset_task);
7111 }
7112
7113 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7114 {
7115         cancel_work_sync(&tp->reset_task);
7116         tg3_flag_clear(tp, RESET_TASK_PENDING);
7117         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7118 }
7119
7120 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7121 {
7122         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7123         struct tg3 *tp = tnapi->tp;
7124         int work_done = 0;
7125         struct tg3_hw_status *sblk = tnapi->hw_status;
7126
7127         while (1) {
7128                 work_done = tg3_poll_work(tnapi, work_done, budget);
7129
7130                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7131                         goto tx_recovery;
7132
7133                 if (unlikely(work_done >= budget))
7134                         break;
7135
7136                 /* tp->last_tag is used in tg3_int_reenable() below
7137                  * to tell the hw how much work has been processed,
7138                  * so we must read it before checking for more work.
7139                  */
7140                 tnapi->last_tag = sblk->status_tag;
7141                 tnapi->last_irq_tag = tnapi->last_tag;
7142                 rmb();
7143
7144                 /* check for RX/TX work to do */
7145                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7146                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7147
7148                         /* This test here is not race free, but will reduce
7149                          * the number of interrupts by looping again.
7150                          */
7151                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7152                                 continue;
7153
7154                         napi_complete(napi);
7155                         /* Reenable interrupts. */
7156                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7157
7158                         /* This test here is synchronized by napi_schedule()
7159                          * and napi_complete() to close the race condition.
7160                          */
7161                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7162                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7163                                                   HOSTCC_MODE_ENABLE |
7164                                                   tnapi->coal_now);
7165                         }
7166                         mmiowb();
7167                         break;
7168                 }
7169         }
7170
7171         return work_done;
7172
7173 tx_recovery:
7174         /* work_done is guaranteed to be less than budget. */
7175         napi_complete(napi);
7176         tg3_reset_task_schedule(tp);
7177         return work_done;
7178 }
7179
7180 static void tg3_process_error(struct tg3 *tp)
7181 {
7182         u32 val;
7183         bool real_error = false;
7184
7185         if (tg3_flag(tp, ERROR_PROCESSED))
7186                 return;
7187
7188         /* Check Flow Attention register */
7189         val = tr32(HOSTCC_FLOW_ATTN);
7190         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7191                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7192                 real_error = true;
7193         }
7194
7195         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7196                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7197                 real_error = true;
7198         }
7199
7200         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7201                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7202                 real_error = true;
7203         }
7204
7205         if (!real_error)
7206                 return;
7207
7208         tg3_dump_state(tp);
7209
7210         tg3_flag_set(tp, ERROR_PROCESSED);
7211         tg3_reset_task_schedule(tp);
7212 }
7213
7214 static int tg3_poll(struct napi_struct *napi, int budget)
7215 {
7216         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7217         struct tg3 *tp = tnapi->tp;
7218         int work_done = 0;
7219         struct tg3_hw_status *sblk = tnapi->hw_status;
7220
7221         while (1) {
7222                 if (sblk->status & SD_STATUS_ERROR)
7223                         tg3_process_error(tp);
7224
7225                 tg3_poll_link(tp);
7226
7227                 work_done = tg3_poll_work(tnapi, work_done, budget);
7228
7229                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7230                         goto tx_recovery;
7231
7232                 if (unlikely(work_done >= budget))
7233                         break;
7234
7235                 if (tg3_flag(tp, TAGGED_STATUS)) {
7236                         /* tp->last_tag is used in tg3_int_reenable() below
7237                          * to tell the hw how much work has been processed,
7238                          * so we must read it before checking for more work.
7239                          */
7240                         tnapi->last_tag = sblk->status_tag;
7241                         tnapi->last_irq_tag = tnapi->last_tag;
7242                         rmb();
7243                 } else
7244                         sblk->status &= ~SD_STATUS_UPDATED;
7245
7246                 if (likely(!tg3_has_work(tnapi))) {
7247                         napi_complete(napi);
7248                         tg3_int_reenable(tnapi);
7249                         break;
7250                 }
7251         }
7252
7253         return work_done;
7254
7255 tx_recovery:
7256         /* work_done is guaranteed to be less than budget. */
7257         napi_complete(napi);
7258         tg3_reset_task_schedule(tp);
7259         return work_done;
7260 }
7261
7262 static void tg3_napi_disable(struct tg3 *tp)
7263 {
7264         int i;
7265
7266         for (i = tp->irq_cnt - 1; i >= 0; i--)
7267                 napi_disable(&tp->napi[i].napi);
7268 }
7269
7270 static void tg3_napi_enable(struct tg3 *tp)
7271 {
7272         int i;
7273
7274         for (i = 0; i < tp->irq_cnt; i++)
7275                 napi_enable(&tp->napi[i].napi);
7276 }
7277
7278 static void tg3_napi_init(struct tg3 *tp)
7279 {
7280         int i;
7281
7282         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7283         for (i = 1; i < tp->irq_cnt; i++)
7284                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7285 }
7286
7287 static void tg3_napi_fini(struct tg3 *tp)
7288 {
7289         int i;
7290
7291         for (i = 0; i < tp->irq_cnt; i++)
7292                 netif_napi_del(&tp->napi[i].napi);
7293 }
7294
7295 static inline void tg3_netif_stop(struct tg3 *tp)
7296 {
7297         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7298         tg3_napi_disable(tp);
7299         netif_carrier_off(tp->dev);
7300         netif_tx_disable(tp->dev);
7301 }
7302
7303 /* tp->lock must be held */
7304 static inline void tg3_netif_start(struct tg3 *tp)
7305 {
7306         tg3_ptp_resume(tp);
7307
7308         /* NOTE: unconditional netif_tx_wake_all_queues is only
7309          * appropriate so long as all callers are assured to
7310          * have free tx slots (such as after tg3_init_hw)
7311          */
7312         netif_tx_wake_all_queues(tp->dev);
7313
7314         if (tp->link_up)
7315                 netif_carrier_on(tp->dev);
7316
7317         tg3_napi_enable(tp);
7318         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7319         tg3_enable_ints(tp);
7320 }
7321
7322 static void tg3_irq_quiesce(struct tg3 *tp)
7323 {
7324         int i;
7325
7326         BUG_ON(tp->irq_sync);
7327
7328         tp->irq_sync = 1;
7329         smp_mb();
7330
7331         for (i = 0; i < tp->irq_cnt; i++)
7332                 synchronize_irq(tp->napi[i].irq_vec);
7333 }
7334
7335 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7336  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7337  * with as well.  Most of the time, this is not necessary except when
7338  * shutting down the device.
7339  */
7340 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7341 {
7342         spin_lock_bh(&tp->lock);
7343         if (irq_sync)
7344                 tg3_irq_quiesce(tp);
7345 }
7346
7347 static inline void tg3_full_unlock(struct tg3 *tp)
7348 {
7349         spin_unlock_bh(&tp->lock);
7350 }
7351
7352 /* One-shot MSI handler - Chip automatically disables interrupt
7353  * after sending MSI so driver doesn't have to do it.
7354  */
7355 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7356 {
7357         struct tg3_napi *tnapi = dev_id;
7358         struct tg3 *tp = tnapi->tp;
7359
7360         prefetch(tnapi->hw_status);
7361         if (tnapi->rx_rcb)
7362                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7363
7364         if (likely(!tg3_irq_sync(tp)))
7365                 napi_schedule(&tnapi->napi);
7366
7367         return IRQ_HANDLED;
7368 }
7369
7370 /* MSI ISR - No need to check for interrupt sharing and no need to
7371  * flush status block and interrupt mailbox. PCI ordering rules
7372  * guarantee that MSI will arrive after the status block.
7373  */
7374 static irqreturn_t tg3_msi(int irq, void *dev_id)
7375 {
7376         struct tg3_napi *tnapi = dev_id;
7377         struct tg3 *tp = tnapi->tp;
7378
7379         prefetch(tnapi->hw_status);
7380         if (tnapi->rx_rcb)
7381                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7382         /*
7383          * Writing any value to intr-mbox-0 clears PCI INTA# and
7384          * chip-internal interrupt pending events.
7385          * Writing non-zero to intr-mbox-0 additional tells the
7386          * NIC to stop sending us irqs, engaging "in-intr-handler"
7387          * event coalescing.
7388          */
7389         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7390         if (likely(!tg3_irq_sync(tp)))
7391                 napi_schedule(&tnapi->napi);
7392
7393         return IRQ_RETVAL(1);
7394 }
7395
7396 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7397 {
7398         struct tg3_napi *tnapi = dev_id;
7399         struct tg3 *tp = tnapi->tp;
7400         struct tg3_hw_status *sblk = tnapi->hw_status;
7401         unsigned int handled = 1;
7402
7403         /* In INTx mode, it is possible for the interrupt to arrive at
7404          * the CPU before the status block posted prior to the interrupt.
7405          * Reading the PCI State register will confirm whether the
7406          * interrupt is ours and will flush the status block.
7407          */
7408         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7409                 if (tg3_flag(tp, CHIP_RESETTING) ||
7410                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7411                         handled = 0;
7412                         goto out;
7413                 }
7414         }
7415
7416         /*
7417          * Writing any value to intr-mbox-0 clears PCI INTA# and
7418          * chip-internal interrupt pending events.
7419          * Writing non-zero to intr-mbox-0 additional tells the
7420          * NIC to stop sending us irqs, engaging "in-intr-handler"
7421          * event coalescing.
7422          *
7423          * Flush the mailbox to de-assert the IRQ immediately to prevent
7424          * spurious interrupts.  The flush impacts performance but
7425          * excessive spurious interrupts can be worse in some cases.
7426          */
7427         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7428         if (tg3_irq_sync(tp))
7429                 goto out;
7430         sblk->status &= ~SD_STATUS_UPDATED;
7431         if (likely(tg3_has_work(tnapi))) {
7432                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7433                 napi_schedule(&tnapi->napi);
7434         } else {
7435                 /* No work, shared interrupt perhaps?  re-enable
7436                  * interrupts, and flush that PCI write
7437                  */
7438                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7439                                0x00000000);
7440         }
7441 out:
7442         return IRQ_RETVAL(handled);
7443 }
7444
7445 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7446 {
7447         struct tg3_napi *tnapi = dev_id;
7448         struct tg3 *tp = tnapi->tp;
7449         struct tg3_hw_status *sblk = tnapi->hw_status;
7450         unsigned int handled = 1;
7451
7452         /* In INTx mode, it is possible for the interrupt to arrive at
7453          * the CPU before the status block posted prior to the interrupt.
7454          * Reading the PCI State register will confirm whether the
7455          * interrupt is ours and will flush the status block.
7456          */
7457         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7458                 if (tg3_flag(tp, CHIP_RESETTING) ||
7459                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7460                         handled = 0;
7461                         goto out;
7462                 }
7463         }
7464
7465         /*
7466          * writing any value to intr-mbox-0 clears PCI INTA# and
7467          * chip-internal interrupt pending events.
7468          * writing non-zero to intr-mbox-0 additional tells the
7469          * NIC to stop sending us irqs, engaging "in-intr-handler"
7470          * event coalescing.
7471          *
7472          * Flush the mailbox to de-assert the IRQ immediately to prevent
7473          * spurious interrupts.  The flush impacts performance but
7474          * excessive spurious interrupts can be worse in some cases.
7475          */
7476         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7477
7478         /*
7479          * In a shared interrupt configuration, sometimes other devices'
7480          * interrupts will scream.  We record the current status tag here
7481          * so that the above check can report that the screaming interrupts
7482          * are unhandled.  Eventually they will be silenced.
7483          */
7484         tnapi->last_irq_tag = sblk->status_tag;
7485
7486         if (tg3_irq_sync(tp))
7487                 goto out;
7488
7489         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7490
7491         napi_schedule(&tnapi->napi);
7492
7493 out:
7494         return IRQ_RETVAL(handled);
7495 }
7496
7497 /* ISR for interrupt test */
7498 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7499 {
7500         struct tg3_napi *tnapi = dev_id;
7501         struct tg3 *tp = tnapi->tp;
7502         struct tg3_hw_status *sblk = tnapi->hw_status;
7503
7504         if ((sblk->status & SD_STATUS_UPDATED) ||
7505             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7506                 tg3_disable_ints(tp);
7507                 return IRQ_RETVAL(1);
7508         }
7509         return IRQ_RETVAL(0);
7510 }
7511
7512 #ifdef CONFIG_NET_POLL_CONTROLLER
7513 static void tg3_poll_controller(struct net_device *dev)
7514 {
7515         int i;
7516         struct tg3 *tp = netdev_priv(dev);
7517
7518         if (tg3_irq_sync(tp))
7519                 return;
7520
7521         for (i = 0; i < tp->irq_cnt; i++)
7522                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7523 }
7524 #endif
7525
7526 static void tg3_tx_timeout(struct net_device *dev)
7527 {
7528         struct tg3 *tp = netdev_priv(dev);
7529
7530         if (netif_msg_tx_err(tp)) {
7531                 netdev_err(dev, "transmit timed out, resetting\n");
7532                 tg3_dump_state(tp);
7533         }
7534
7535         tg3_reset_task_schedule(tp);
7536 }
7537
7538 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7539 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7540 {
7541         u32 base = (u32) mapping & 0xffffffff;
7542
7543         return (base > 0xffffdcc0) && (base + len + 8 < base);
7544 }
7545
7546 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7547  * of any 4GB boundaries: 4G, 8G, etc
7548  */
7549 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7550                                            u32 len, u32 mss)
7551 {
7552         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7553                 u32 base = (u32) mapping & 0xffffffff;
7554
7555                 return ((base + len + (mss & 0x3fff)) < base);
7556         }
7557         return 0;
7558 }
7559
7560 /* Test for DMA addresses > 40-bit */
7561 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7562                                           int len)
7563 {
7564 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7565         if (tg3_flag(tp, 40BIT_DMA_BUG))
7566                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7567         return 0;
7568 #else
7569         return 0;
7570 #endif
7571 }
7572
7573 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7574                                  dma_addr_t mapping, u32 len, u32 flags,
7575                                  u32 mss, u32 vlan)
7576 {
7577         txbd->addr_hi = ((u64) mapping >> 32);
7578         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7579         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7580         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7581 }
7582
7583 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7584                             dma_addr_t map, u32 len, u32 flags,
7585                             u32 mss, u32 vlan)
7586 {
7587         struct tg3 *tp = tnapi->tp;
7588         bool hwbug = false;
7589
7590         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7591                 hwbug = true;
7592
7593         if (tg3_4g_overflow_test(map, len))
7594                 hwbug = true;
7595
7596         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7597                 hwbug = true;
7598
7599         if (tg3_40bit_overflow_test(tp, map, len))
7600                 hwbug = true;
7601
7602         if (tp->dma_limit) {
7603                 u32 prvidx = *entry;
7604                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7605                 while (len > tp->dma_limit && *budget) {
7606                         u32 frag_len = tp->dma_limit;
7607                         len -= tp->dma_limit;
7608
7609                         /* Avoid the 8byte DMA problem */
7610                         if (len <= 8) {
7611                                 len += tp->dma_limit / 2;
7612                                 frag_len = tp->dma_limit / 2;
7613                         }
7614
7615                         tnapi->tx_buffers[*entry].fragmented = true;
7616
7617                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7618                                       frag_len, tmp_flag, mss, vlan);
7619                         *budget -= 1;
7620                         prvidx = *entry;
7621                         *entry = NEXT_TX(*entry);
7622
7623                         map += frag_len;
7624                 }
7625
7626                 if (len) {
7627                         if (*budget) {
7628                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7629                                               len, flags, mss, vlan);
7630                                 *budget -= 1;
7631                                 *entry = NEXT_TX(*entry);
7632                         } else {
7633                                 hwbug = true;
7634                                 tnapi->tx_buffers[prvidx].fragmented = false;
7635                         }
7636                 }
7637         } else {
7638                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7639                               len, flags, mss, vlan);
7640                 *entry = NEXT_TX(*entry);
7641         }
7642
7643         return hwbug;
7644 }
7645
7646 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7647 {
7648         int i;
7649         struct sk_buff *skb;
7650         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7651
7652         skb = txb->skb;
7653         txb->skb = NULL;
7654
7655         pci_unmap_single(tnapi->tp->pdev,
7656                          dma_unmap_addr(txb, mapping),
7657                          skb_headlen(skb),
7658                          PCI_DMA_TODEVICE);
7659
7660         while (txb->fragmented) {
7661                 txb->fragmented = false;
7662                 entry = NEXT_TX(entry);
7663                 txb = &tnapi->tx_buffers[entry];
7664         }
7665
7666         for (i = 0; i <= last; i++) {
7667                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7668
7669                 entry = NEXT_TX(entry);
7670                 txb = &tnapi->tx_buffers[entry];
7671
7672                 pci_unmap_page(tnapi->tp->pdev,
7673                                dma_unmap_addr(txb, mapping),
7674                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7675
7676                 while (txb->fragmented) {
7677                         txb->fragmented = false;
7678                         entry = NEXT_TX(entry);
7679                         txb = &tnapi->tx_buffers[entry];
7680                 }
7681         }
7682 }
7683
7684 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7685 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7686                                        struct sk_buff **pskb,
7687                                        u32 *entry, u32 *budget,
7688                                        u32 base_flags, u32 mss, u32 vlan)
7689 {
7690         struct tg3 *tp = tnapi->tp;
7691         struct sk_buff *new_skb, *skb = *pskb;
7692         dma_addr_t new_addr = 0;
7693         int ret = 0;
7694
7695         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7696                 new_skb = skb_copy(skb, GFP_ATOMIC);
7697         else {
7698                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7699
7700                 new_skb = skb_copy_expand(skb,
7701                                           skb_headroom(skb) + more_headroom,
7702                                           skb_tailroom(skb), GFP_ATOMIC);
7703         }
7704
7705         if (!new_skb) {
7706                 ret = -1;
7707         } else {
7708                 /* New SKB is guaranteed to be linear. */
7709                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7710                                           PCI_DMA_TODEVICE);
7711                 /* Make sure the mapping succeeded */
7712                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7713                         dev_kfree_skb(new_skb);
7714                         ret = -1;
7715                 } else {
7716                         u32 save_entry = *entry;
7717
7718                         base_flags |= TXD_FLAG_END;
7719
7720                         tnapi->tx_buffers[*entry].skb = new_skb;
7721                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7722                                            mapping, new_addr);
7723
7724                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7725                                             new_skb->len, base_flags,
7726                                             mss, vlan)) {
7727                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7728                                 dev_kfree_skb(new_skb);
7729                                 ret = -1;
7730                         }
7731                 }
7732         }
7733
7734         dev_kfree_skb(skb);
7735         *pskb = new_skb;
7736         return ret;
7737 }
7738
7739 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7740
7741 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7742  * TSO header is greater than 80 bytes.
7743  */
7744 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7745 {
7746         struct sk_buff *segs, *nskb;
7747         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7748
7749         /* Estimate the number of fragments in the worst case */
7750         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7751                 netif_stop_queue(tp->dev);
7752
7753                 /* netif_tx_stop_queue() must be done before checking
7754                  * checking tx index in tg3_tx_avail() below, because in
7755                  * tg3_tx(), we update tx index before checking for
7756                  * netif_tx_queue_stopped().
7757                  */
7758                 smp_mb();
7759                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7760                         return NETDEV_TX_BUSY;
7761
7762                 netif_wake_queue(tp->dev);
7763         }
7764
7765         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7766         if (IS_ERR(segs))
7767                 goto tg3_tso_bug_end;
7768
7769         do {
7770                 nskb = segs;
7771                 segs = segs->next;
7772                 nskb->next = NULL;
7773                 tg3_start_xmit(nskb, tp->dev);
7774         } while (segs);
7775
7776 tg3_tso_bug_end:
7777         dev_kfree_skb(skb);
7778
7779         return NETDEV_TX_OK;
7780 }
7781
7782 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7783  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7784  */
7785 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7786 {
7787         struct tg3 *tp = netdev_priv(dev);
7788         u32 len, entry, base_flags, mss, vlan = 0;
7789         u32 budget;
7790         int i = -1, would_hit_hwbug;
7791         dma_addr_t mapping;
7792         struct tg3_napi *tnapi;
7793         struct netdev_queue *txq;
7794         unsigned int last;
7795
7796         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7797         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7798         if (tg3_flag(tp, ENABLE_TSS))
7799                 tnapi++;
7800
7801         budget = tg3_tx_avail(tnapi);
7802
7803         /* We are running in BH disabled context with netif_tx_lock
7804          * and TX reclaim runs via tp->napi.poll inside of a software
7805          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7806          * no IRQ context deadlocks to worry about either.  Rejoice!
7807          */
7808         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7809                 if (!netif_tx_queue_stopped(txq)) {
7810                         netif_tx_stop_queue(txq);
7811
7812                         /* This is a hard error, log it. */
7813                         netdev_err(dev,
7814                                    "BUG! Tx Ring full when queue awake!\n");
7815                 }
7816                 return NETDEV_TX_BUSY;
7817         }
7818
7819         entry = tnapi->tx_prod;
7820         base_flags = 0;
7821         if (skb->ip_summed == CHECKSUM_PARTIAL)
7822                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7823
7824         mss = skb_shinfo(skb)->gso_size;
7825         if (mss) {
7826                 struct iphdr *iph;
7827                 u32 tcp_opt_len, hdr_len;
7828
7829                 if (skb_header_cloned(skb) &&
7830                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7831                         goto drop;
7832
7833                 iph = ip_hdr(skb);
7834                 tcp_opt_len = tcp_optlen(skb);
7835
7836                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7837
7838                 if (!skb_is_gso_v6(skb)) {
7839                         iph->check = 0;
7840                         iph->tot_len = htons(mss + hdr_len);
7841                 }
7842
7843                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7844                     tg3_flag(tp, TSO_BUG))
7845                         return tg3_tso_bug(tp, skb);
7846
7847                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7848                                TXD_FLAG_CPU_POST_DMA);
7849
7850                 if (tg3_flag(tp, HW_TSO_1) ||
7851                     tg3_flag(tp, HW_TSO_2) ||
7852                     tg3_flag(tp, HW_TSO_3)) {
7853                         tcp_hdr(skb)->check = 0;
7854                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7855                 } else
7856                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7857                                                                  iph->daddr, 0,
7858                                                                  IPPROTO_TCP,
7859                                                                  0);
7860
7861                 if (tg3_flag(tp, HW_TSO_3)) {
7862                         mss |= (hdr_len & 0xc) << 12;
7863                         if (hdr_len & 0x10)
7864                                 base_flags |= 0x00000010;
7865                         base_flags |= (hdr_len & 0x3e0) << 5;
7866                 } else if (tg3_flag(tp, HW_TSO_2))
7867                         mss |= hdr_len << 9;
7868                 else if (tg3_flag(tp, HW_TSO_1) ||
7869                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7870                         if (tcp_opt_len || iph->ihl > 5) {
7871                                 int tsflags;
7872
7873                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7874                                 mss |= (tsflags << 11);
7875                         }
7876                 } else {
7877                         if (tcp_opt_len || iph->ihl > 5) {
7878                                 int tsflags;
7879
7880                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7881                                 base_flags |= tsflags << 12;
7882                         }
7883                 }
7884         }
7885
7886         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7887             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7888                 base_flags |= TXD_FLAG_JMB_PKT;
7889
7890         if (vlan_tx_tag_present(skb)) {
7891                 base_flags |= TXD_FLAG_VLAN;
7892                 vlan = vlan_tx_tag_get(skb);
7893         }
7894
7895         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7896             tg3_flag(tp, TX_TSTAMP_EN)) {
7897                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7898                 base_flags |= TXD_FLAG_HWTSTAMP;
7899         }
7900
7901         len = skb_headlen(skb);
7902
7903         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7904         if (pci_dma_mapping_error(tp->pdev, mapping))
7905                 goto drop;
7906
7907
7908         tnapi->tx_buffers[entry].skb = skb;
7909         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7910
7911         would_hit_hwbug = 0;
7912
7913         if (tg3_flag(tp, 5701_DMA_BUG))
7914                 would_hit_hwbug = 1;
7915
7916         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7917                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7918                             mss, vlan)) {
7919                 would_hit_hwbug = 1;
7920         } else if (skb_shinfo(skb)->nr_frags > 0) {
7921                 u32 tmp_mss = mss;
7922
7923                 if (!tg3_flag(tp, HW_TSO_1) &&
7924                     !tg3_flag(tp, HW_TSO_2) &&
7925                     !tg3_flag(tp, HW_TSO_3))
7926                         tmp_mss = 0;
7927
7928                 /* Now loop through additional data
7929                  * fragments, and queue them.
7930                  */
7931                 last = skb_shinfo(skb)->nr_frags - 1;
7932                 for (i = 0; i <= last; i++) {
7933                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7934
7935                         len = skb_frag_size(frag);
7936                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7937                                                    len, DMA_TO_DEVICE);
7938
7939                         tnapi->tx_buffers[entry].skb = NULL;
7940                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7941                                            mapping);
7942                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7943                                 goto dma_error;
7944
7945                         if (!budget ||
7946                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7947                                             len, base_flags |
7948                                             ((i == last) ? TXD_FLAG_END : 0),
7949                                             tmp_mss, vlan)) {
7950                                 would_hit_hwbug = 1;
7951                                 break;
7952                         }
7953                 }
7954         }
7955
7956         if (would_hit_hwbug) {
7957                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7958
7959                 /* If the workaround fails due to memory/mapping
7960                  * failure, silently drop this packet.
7961                  */
7962                 entry = tnapi->tx_prod;
7963                 budget = tg3_tx_avail(tnapi);
7964                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7965                                                 base_flags, mss, vlan))
7966                         goto drop_nofree;
7967         }
7968
7969         skb_tx_timestamp(skb);
7970         netdev_tx_sent_queue(txq, skb->len);
7971
7972         /* Sync BD data before updating mailbox */
7973         wmb();
7974
7975         /* Packets are ready, update Tx producer idx local and on card. */
7976         tw32_tx_mbox(tnapi->prodmbox, entry);
7977
7978         tnapi->tx_prod = entry;
7979         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7980                 netif_tx_stop_queue(txq);
7981
7982                 /* netif_tx_stop_queue() must be done before checking
7983                  * checking tx index in tg3_tx_avail() below, because in
7984                  * tg3_tx(), we update tx index before checking for
7985                  * netif_tx_queue_stopped().
7986                  */
7987                 smp_mb();
7988                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7989                         netif_tx_wake_queue(txq);
7990         }
7991
7992         mmiowb();
7993         return NETDEV_TX_OK;
7994
7995 dma_error:
7996         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7997         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7998 drop:
7999         dev_kfree_skb(skb);
8000 drop_nofree:
8001         tp->tx_dropped++;
8002         return NETDEV_TX_OK;
8003 }
8004
8005 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8006 {
8007         if (enable) {
8008                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8009                                   MAC_MODE_PORT_MODE_MASK);
8010
8011                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8012
8013                 if (!tg3_flag(tp, 5705_PLUS))
8014                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8015
8016                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8017                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8018                 else
8019                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8020         } else {
8021                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8022
8023                 if (tg3_flag(tp, 5705_PLUS) ||
8024                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8025                     tg3_asic_rev(tp) == ASIC_REV_5700)
8026                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8027         }
8028
8029         tw32(MAC_MODE, tp->mac_mode);
8030         udelay(40);
8031 }
8032
8033 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8034 {
8035         u32 val, bmcr, mac_mode, ptest = 0;
8036
8037         tg3_phy_toggle_apd(tp, false);
8038         tg3_phy_toggle_automdix(tp, false);
8039
8040         if (extlpbk && tg3_phy_set_extloopbk(tp))
8041                 return -EIO;
8042
8043         bmcr = BMCR_FULLDPLX;
8044         switch (speed) {
8045         case SPEED_10:
8046                 break;
8047         case SPEED_100:
8048                 bmcr |= BMCR_SPEED100;
8049                 break;
8050         case SPEED_1000:
8051         default:
8052                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8053                         speed = SPEED_100;
8054                         bmcr |= BMCR_SPEED100;
8055                 } else {
8056                         speed = SPEED_1000;
8057                         bmcr |= BMCR_SPEED1000;
8058                 }
8059         }
8060
8061         if (extlpbk) {
8062                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8063                         tg3_readphy(tp, MII_CTRL1000, &val);
8064                         val |= CTL1000_AS_MASTER |
8065                                CTL1000_ENABLE_MASTER;
8066                         tg3_writephy(tp, MII_CTRL1000, val);
8067                 } else {
8068                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8069                                 MII_TG3_FET_PTEST_TRIM_2;
8070                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8071                 }
8072         } else
8073                 bmcr |= BMCR_LOOPBACK;
8074
8075         tg3_writephy(tp, MII_BMCR, bmcr);
8076
8077         /* The write needs to be flushed for the FETs */
8078         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8079                 tg3_readphy(tp, MII_BMCR, &bmcr);
8080
8081         udelay(40);
8082
8083         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8084             tg3_asic_rev(tp) == ASIC_REV_5785) {
8085                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8086                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8087                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8088
8089                 /* The write needs to be flushed for the AC131 */
8090                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8091         }
8092
8093         /* Reset to prevent losing 1st rx packet intermittently */
8094         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8095             tg3_flag(tp, 5780_CLASS)) {
8096                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8097                 udelay(10);
8098                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8099         }
8100
8101         mac_mode = tp->mac_mode &
8102                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8103         if (speed == SPEED_1000)
8104                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8105         else
8106                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8107
8108         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8109                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8110
8111                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8112                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8113                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8114                         mac_mode |= MAC_MODE_LINK_POLARITY;
8115
8116                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8117                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8118         }
8119
8120         tw32(MAC_MODE, mac_mode);
8121         udelay(40);
8122
8123         return 0;
8124 }
8125
8126 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8127 {
8128         struct tg3 *tp = netdev_priv(dev);
8129
8130         if (features & NETIF_F_LOOPBACK) {
8131                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8132                         return;
8133
8134                 spin_lock_bh(&tp->lock);
8135                 tg3_mac_loopback(tp, true);
8136                 netif_carrier_on(tp->dev);
8137                 spin_unlock_bh(&tp->lock);
8138                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8139         } else {
8140                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8141                         return;
8142
8143                 spin_lock_bh(&tp->lock);
8144                 tg3_mac_loopback(tp, false);
8145                 /* Force link status check */
8146                 tg3_setup_phy(tp, true);
8147                 spin_unlock_bh(&tp->lock);
8148                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8149         }
8150 }
8151
8152 static netdev_features_t tg3_fix_features(struct net_device *dev,
8153         netdev_features_t features)
8154 {
8155         struct tg3 *tp = netdev_priv(dev);
8156
8157         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8158                 features &= ~NETIF_F_ALL_TSO;
8159
8160         return features;
8161 }
8162
8163 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8164 {
8165         netdev_features_t changed = dev->features ^ features;
8166
8167         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8168                 tg3_set_loopback(dev, features);
8169
8170         return 0;
8171 }
8172
8173 static void tg3_rx_prodring_free(struct tg3 *tp,
8174                                  struct tg3_rx_prodring_set *tpr)
8175 {
8176         int i;
8177
8178         if (tpr != &tp->napi[0].prodring) {
8179                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8180                      i = (i + 1) & tp->rx_std_ring_mask)
8181                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8182                                         tp->rx_pkt_map_sz);
8183
8184                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8185                         for (i = tpr->rx_jmb_cons_idx;
8186                              i != tpr->rx_jmb_prod_idx;
8187                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8188                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8189                                                 TG3_RX_JMB_MAP_SZ);
8190                         }
8191                 }
8192
8193                 return;
8194         }
8195
8196         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8197                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8198                                 tp->rx_pkt_map_sz);
8199
8200         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8201                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8202                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8203                                         TG3_RX_JMB_MAP_SZ);
8204         }
8205 }
8206
8207 /* Initialize rx rings for packet processing.
8208  *
8209  * The chip has been shut down and the driver detached from
8210  * the networking, so no interrupts or new tx packets will
8211  * end up in the driver.  tp->{tx,}lock are held and thus
8212  * we may not sleep.
8213  */
8214 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8215                                  struct tg3_rx_prodring_set *tpr)
8216 {
8217         u32 i, rx_pkt_dma_sz;
8218
8219         tpr->rx_std_cons_idx = 0;
8220         tpr->rx_std_prod_idx = 0;
8221         tpr->rx_jmb_cons_idx = 0;
8222         tpr->rx_jmb_prod_idx = 0;
8223
8224         if (tpr != &tp->napi[0].prodring) {
8225                 memset(&tpr->rx_std_buffers[0], 0,
8226                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8227                 if (tpr->rx_jmb_buffers)
8228                         memset(&tpr->rx_jmb_buffers[0], 0,
8229                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8230                 goto done;
8231         }
8232
8233         /* Zero out all descriptors. */
8234         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8235
8236         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8237         if (tg3_flag(tp, 5780_CLASS) &&
8238             tp->dev->mtu > ETH_DATA_LEN)
8239                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8240         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8241
8242         /* Initialize invariants of the rings, we only set this
8243          * stuff once.  This works because the card does not
8244          * write into the rx buffer posting rings.
8245          */
8246         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8247                 struct tg3_rx_buffer_desc *rxd;
8248
8249                 rxd = &tpr->rx_std[i];
8250                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8251                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8252                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8253                                (i << RXD_OPAQUE_INDEX_SHIFT));
8254         }
8255
8256         /* Now allocate fresh SKBs for each rx ring. */
8257         for (i = 0; i < tp->rx_pending; i++) {
8258                 unsigned int frag_size;
8259
8260                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8261                                       &frag_size) < 0) {
8262                         netdev_warn(tp->dev,
8263                                     "Using a smaller RX standard ring. Only "
8264                                     "%d out of %d buffers were allocated "
8265                                     "successfully\n", i, tp->rx_pending);
8266                         if (i == 0)
8267                                 goto initfail;
8268                         tp->rx_pending = i;
8269                         break;
8270                 }
8271         }
8272
8273         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8274                 goto done;
8275
8276         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8277
8278         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8279                 goto done;
8280
8281         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8282                 struct tg3_rx_buffer_desc *rxd;
8283
8284                 rxd = &tpr->rx_jmb[i].std;
8285                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8286                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8287                                   RXD_FLAG_JUMBO;
8288                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8289                        (i << RXD_OPAQUE_INDEX_SHIFT));
8290         }
8291
8292         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8293                 unsigned int frag_size;
8294
8295                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8296                                       &frag_size) < 0) {
8297                         netdev_warn(tp->dev,
8298                                     "Using a smaller RX jumbo ring. Only %d "
8299                                     "out of %d buffers were allocated "
8300                                     "successfully\n", i, tp->rx_jumbo_pending);
8301                         if (i == 0)
8302                                 goto initfail;
8303                         tp->rx_jumbo_pending = i;
8304                         break;
8305                 }
8306         }
8307
8308 done:
8309         return 0;
8310
8311 initfail:
8312         tg3_rx_prodring_free(tp, tpr);
8313         return -ENOMEM;
8314 }
8315
8316 static void tg3_rx_prodring_fini(struct tg3 *tp,
8317                                  struct tg3_rx_prodring_set *tpr)
8318 {
8319         kfree(tpr->rx_std_buffers);
8320         tpr->rx_std_buffers = NULL;
8321         kfree(tpr->rx_jmb_buffers);
8322         tpr->rx_jmb_buffers = NULL;
8323         if (tpr->rx_std) {
8324                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8325                                   tpr->rx_std, tpr->rx_std_mapping);
8326                 tpr->rx_std = NULL;
8327         }
8328         if (tpr->rx_jmb) {
8329                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8330                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8331                 tpr->rx_jmb = NULL;
8332         }
8333 }
8334
8335 static int tg3_rx_prodring_init(struct tg3 *tp,
8336                                 struct tg3_rx_prodring_set *tpr)
8337 {
8338         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8339                                       GFP_KERNEL);
8340         if (!tpr->rx_std_buffers)
8341                 return -ENOMEM;
8342
8343         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8344                                          TG3_RX_STD_RING_BYTES(tp),
8345                                          &tpr->rx_std_mapping,
8346                                          GFP_KERNEL);
8347         if (!tpr->rx_std)
8348                 goto err_out;
8349
8350         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8351                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8352                                               GFP_KERNEL);
8353                 if (!tpr->rx_jmb_buffers)
8354                         goto err_out;
8355
8356                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8357                                                  TG3_RX_JMB_RING_BYTES(tp),
8358                                                  &tpr->rx_jmb_mapping,
8359                                                  GFP_KERNEL);
8360                 if (!tpr->rx_jmb)
8361                         goto err_out;
8362         }
8363
8364         return 0;
8365
8366 err_out:
8367         tg3_rx_prodring_fini(tp, tpr);
8368         return -ENOMEM;
8369 }
8370
8371 /* Free up pending packets in all rx/tx rings.
8372  *
8373  * The chip has been shut down and the driver detached from
8374  * the networking, so no interrupts or new tx packets will
8375  * end up in the driver.  tp->{tx,}lock is not held and we are not
8376  * in an interrupt context and thus may sleep.
8377  */
8378 static void tg3_free_rings(struct tg3 *tp)
8379 {
8380         int i, j;
8381
8382         for (j = 0; j < tp->irq_cnt; j++) {
8383                 struct tg3_napi *tnapi = &tp->napi[j];
8384
8385                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8386
8387                 if (!tnapi->tx_buffers)
8388                         continue;
8389
8390                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8391                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8392
8393                         if (!skb)
8394                                 continue;
8395
8396                         tg3_tx_skb_unmap(tnapi, i,
8397                                          skb_shinfo(skb)->nr_frags - 1);
8398
8399                         dev_kfree_skb_any(skb);
8400                 }
8401                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8402         }
8403 }
8404
8405 /* Initialize tx/rx rings for packet processing.
8406  *
8407  * The chip has been shut down and the driver detached from
8408  * the networking, so no interrupts or new tx packets will
8409  * end up in the driver.  tp->{tx,}lock are held and thus
8410  * we may not sleep.
8411  */
8412 static int tg3_init_rings(struct tg3 *tp)
8413 {
8414         int i;
8415
8416         /* Free up all the SKBs. */
8417         tg3_free_rings(tp);
8418
8419         for (i = 0; i < tp->irq_cnt; i++) {
8420                 struct tg3_napi *tnapi = &tp->napi[i];
8421
8422                 tnapi->last_tag = 0;
8423                 tnapi->last_irq_tag = 0;
8424                 tnapi->hw_status->status = 0;
8425                 tnapi->hw_status->status_tag = 0;
8426                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8427
8428                 tnapi->tx_prod = 0;
8429                 tnapi->tx_cons = 0;
8430                 if (tnapi->tx_ring)
8431                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8432
8433                 tnapi->rx_rcb_ptr = 0;
8434                 if (tnapi->rx_rcb)
8435                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8436
8437                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8438                         tg3_free_rings(tp);
8439                         return -ENOMEM;
8440                 }
8441         }
8442
8443         return 0;
8444 }
8445
8446 static void tg3_mem_tx_release(struct tg3 *tp)
8447 {
8448         int i;
8449
8450         for (i = 0; i < tp->irq_max; i++) {
8451                 struct tg3_napi *tnapi = &tp->napi[i];
8452
8453                 if (tnapi->tx_ring) {
8454                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8455                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8456                         tnapi->tx_ring = NULL;
8457                 }
8458
8459                 kfree(tnapi->tx_buffers);
8460                 tnapi->tx_buffers = NULL;
8461         }
8462 }
8463
8464 static int tg3_mem_tx_acquire(struct tg3 *tp)
8465 {
8466         int i;
8467         struct tg3_napi *tnapi = &tp->napi[0];
8468
8469         /* If multivector TSS is enabled, vector 0 does not handle
8470          * tx interrupts.  Don't allocate any resources for it.
8471          */
8472         if (tg3_flag(tp, ENABLE_TSS))
8473                 tnapi++;
8474
8475         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8476                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8477                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8478                 if (!tnapi->tx_buffers)
8479                         goto err_out;
8480
8481                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8482                                                     TG3_TX_RING_BYTES,
8483                                                     &tnapi->tx_desc_mapping,
8484                                                     GFP_KERNEL);
8485                 if (!tnapi->tx_ring)
8486                         goto err_out;
8487         }
8488
8489         return 0;
8490
8491 err_out:
8492         tg3_mem_tx_release(tp);
8493         return -ENOMEM;
8494 }
8495
8496 static void tg3_mem_rx_release(struct tg3 *tp)
8497 {
8498         int i;
8499
8500         for (i = 0; i < tp->irq_max; i++) {
8501                 struct tg3_napi *tnapi = &tp->napi[i];
8502
8503                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8504
8505                 if (!tnapi->rx_rcb)
8506                         continue;
8507
8508                 dma_free_coherent(&tp->pdev->dev,
8509                                   TG3_RX_RCB_RING_BYTES(tp),
8510                                   tnapi->rx_rcb,
8511                                   tnapi->rx_rcb_mapping);
8512                 tnapi->rx_rcb = NULL;
8513         }
8514 }
8515
8516 static int tg3_mem_rx_acquire(struct tg3 *tp)
8517 {
8518         unsigned int i, limit;
8519
8520         limit = tp->rxq_cnt;
8521
8522         /* If RSS is enabled, we need a (dummy) producer ring
8523          * set on vector zero.  This is the true hw prodring.
8524          */
8525         if (tg3_flag(tp, ENABLE_RSS))
8526                 limit++;
8527
8528         for (i = 0; i < limit; i++) {
8529                 struct tg3_napi *tnapi = &tp->napi[i];
8530
8531                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8532                         goto err_out;
8533
8534                 /* If multivector RSS is enabled, vector 0
8535                  * does not handle rx or tx interrupts.
8536                  * Don't allocate any resources for it.
8537                  */
8538                 if (!i && tg3_flag(tp, ENABLE_RSS))
8539                         continue;
8540
8541                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8542                                                    TG3_RX_RCB_RING_BYTES(tp),
8543                                                    &tnapi->rx_rcb_mapping,
8544                                                    GFP_KERNEL | __GFP_ZERO);
8545                 if (!tnapi->rx_rcb)
8546                         goto err_out;
8547         }
8548
8549         return 0;
8550
8551 err_out:
8552         tg3_mem_rx_release(tp);
8553         return -ENOMEM;
8554 }
8555
8556 /*
8557  * Must not be invoked with interrupt sources disabled and
8558  * the hardware shutdown down.
8559  */
8560 static void tg3_free_consistent(struct tg3 *tp)
8561 {
8562         int i;
8563
8564         for (i = 0; i < tp->irq_cnt; i++) {
8565                 struct tg3_napi *tnapi = &tp->napi[i];
8566
8567                 if (tnapi->hw_status) {
8568                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8569                                           tnapi->hw_status,
8570                                           tnapi->status_mapping);
8571                         tnapi->hw_status = NULL;
8572                 }
8573         }
8574
8575         tg3_mem_rx_release(tp);
8576         tg3_mem_tx_release(tp);
8577
8578         if (tp->hw_stats) {
8579                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8580                                   tp->hw_stats, tp->stats_mapping);
8581                 tp->hw_stats = NULL;
8582         }
8583 }
8584
8585 /*
8586  * Must not be invoked with interrupt sources disabled and
8587  * the hardware shutdown down.  Can sleep.
8588  */
8589 static int tg3_alloc_consistent(struct tg3 *tp)
8590 {
8591         int i;
8592
8593         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8594                                           sizeof(struct tg3_hw_stats),
8595                                           &tp->stats_mapping,
8596                                           GFP_KERNEL | __GFP_ZERO);
8597         if (!tp->hw_stats)
8598                 goto err_out;
8599
8600         for (i = 0; i < tp->irq_cnt; i++) {
8601                 struct tg3_napi *tnapi = &tp->napi[i];
8602                 struct tg3_hw_status *sblk;
8603
8604                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8605                                                       TG3_HW_STATUS_SIZE,
8606                                                       &tnapi->status_mapping,
8607                                                       GFP_KERNEL | __GFP_ZERO);
8608                 if (!tnapi->hw_status)
8609                         goto err_out;
8610
8611                 sblk = tnapi->hw_status;
8612
8613                 if (tg3_flag(tp, ENABLE_RSS)) {
8614                         u16 *prodptr = NULL;
8615
8616                         /*
8617                          * When RSS is enabled, the status block format changes
8618                          * slightly.  The "rx_jumbo_consumer", "reserved",
8619                          * and "rx_mini_consumer" members get mapped to the
8620                          * other three rx return ring producer indexes.
8621                          */
8622                         switch (i) {
8623                         case 1:
8624                                 prodptr = &sblk->idx[0].rx_producer;
8625                                 break;
8626                         case 2:
8627                                 prodptr = &sblk->rx_jumbo_consumer;
8628                                 break;
8629                         case 3:
8630                                 prodptr = &sblk->reserved;
8631                                 break;
8632                         case 4:
8633                                 prodptr = &sblk->rx_mini_consumer;
8634                                 break;
8635                         }
8636                         tnapi->rx_rcb_prod_idx = prodptr;
8637                 } else {
8638                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8639                 }
8640         }
8641
8642         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8643                 goto err_out;
8644
8645         return 0;
8646
8647 err_out:
8648         tg3_free_consistent(tp);
8649         return -ENOMEM;
8650 }
8651
8652 #define MAX_WAIT_CNT 1000
8653
8654 /* To stop a block, clear the enable bit and poll till it
8655  * clears.  tp->lock is held.
8656  */
8657 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8658 {
8659         unsigned int i;
8660         u32 val;
8661
8662         if (tg3_flag(tp, 5705_PLUS)) {
8663                 switch (ofs) {
8664                 case RCVLSC_MODE:
8665                 case DMAC_MODE:
8666                 case MBFREE_MODE:
8667                 case BUFMGR_MODE:
8668                 case MEMARB_MODE:
8669                         /* We can't enable/disable these bits of the
8670                          * 5705/5750, just say success.
8671                          */
8672                         return 0;
8673
8674                 default:
8675                         break;
8676                 }
8677         }
8678
8679         val = tr32(ofs);
8680         val &= ~enable_bit;
8681         tw32_f(ofs, val);
8682
8683         for (i = 0; i < MAX_WAIT_CNT; i++) {
8684                 if (pci_channel_offline(tp->pdev)) {
8685                         dev_err(&tp->pdev->dev,
8686                                 "tg3_stop_block device offline, "
8687                                 "ofs=%lx enable_bit=%x\n",
8688                                 ofs, enable_bit);
8689                         return -ENODEV;
8690                 }
8691
8692                 udelay(100);
8693                 val = tr32(ofs);
8694                 if ((val & enable_bit) == 0)
8695                         break;
8696         }
8697
8698         if (i == MAX_WAIT_CNT && !silent) {
8699                 dev_err(&tp->pdev->dev,
8700                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8701                         ofs, enable_bit);
8702                 return -ENODEV;
8703         }
8704
8705         return 0;
8706 }
8707
8708 /* tp->lock is held. */
8709 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8710 {
8711         int i, err;
8712
8713         tg3_disable_ints(tp);
8714
8715         if (pci_channel_offline(tp->pdev)) {
8716                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8717                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8718                 err = -ENODEV;
8719                 goto err_no_dev;
8720         }
8721
8722         tp->rx_mode &= ~RX_MODE_ENABLE;
8723         tw32_f(MAC_RX_MODE, tp->rx_mode);
8724         udelay(10);
8725
8726         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8727         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8728         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8729         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8730         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8731         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8732
8733         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8734         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8735         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8736         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8737         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8738         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8739         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8740
8741         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8742         tw32_f(MAC_MODE, tp->mac_mode);
8743         udelay(40);
8744
8745         tp->tx_mode &= ~TX_MODE_ENABLE;
8746         tw32_f(MAC_TX_MODE, tp->tx_mode);
8747
8748         for (i = 0; i < MAX_WAIT_CNT; i++) {
8749                 udelay(100);
8750                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8751                         break;
8752         }
8753         if (i >= MAX_WAIT_CNT) {
8754                 dev_err(&tp->pdev->dev,
8755                         "%s timed out, TX_MODE_ENABLE will not clear "
8756                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8757                 err |= -ENODEV;
8758         }
8759
8760         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8761         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8762         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8763
8764         tw32(FTQ_RESET, 0xffffffff);
8765         tw32(FTQ_RESET, 0x00000000);
8766
8767         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8768         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8769
8770 err_no_dev:
8771         for (i = 0; i < tp->irq_cnt; i++) {
8772                 struct tg3_napi *tnapi = &tp->napi[i];
8773                 if (tnapi->hw_status)
8774                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8775         }
8776
8777         return err;
8778 }
8779
8780 /* Save PCI command register before chip reset */
8781 static void tg3_save_pci_state(struct tg3 *tp)
8782 {
8783         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8784 }
8785
8786 /* Restore PCI state after chip reset */
8787 static void tg3_restore_pci_state(struct tg3 *tp)
8788 {
8789         u32 val;
8790
8791         /* Re-enable indirect register accesses. */
8792         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8793                                tp->misc_host_ctrl);
8794
8795         /* Set MAX PCI retry to zero. */
8796         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8797         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8798             tg3_flag(tp, PCIX_MODE))
8799                 val |= PCISTATE_RETRY_SAME_DMA;
8800         /* Allow reads and writes to the APE register and memory space. */
8801         if (tg3_flag(tp, ENABLE_APE))
8802                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8803                        PCISTATE_ALLOW_APE_SHMEM_WR |
8804                        PCISTATE_ALLOW_APE_PSPACE_WR;
8805         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8806
8807         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8808
8809         if (!tg3_flag(tp, PCI_EXPRESS)) {
8810                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8811                                       tp->pci_cacheline_sz);
8812                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8813                                       tp->pci_lat_timer);
8814         }
8815
8816         /* Make sure PCI-X relaxed ordering bit is clear. */
8817         if (tg3_flag(tp, PCIX_MODE)) {
8818                 u16 pcix_cmd;
8819
8820                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8821                                      &pcix_cmd);
8822                 pcix_cmd &= ~PCI_X_CMD_ERO;
8823                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8824                                       pcix_cmd);
8825         }
8826
8827         if (tg3_flag(tp, 5780_CLASS)) {
8828
8829                 /* Chip reset on 5780 will reset MSI enable bit,
8830                  * so need to restore it.
8831                  */
8832                 if (tg3_flag(tp, USING_MSI)) {
8833                         u16 ctrl;
8834
8835                         pci_read_config_word(tp->pdev,
8836                                              tp->msi_cap + PCI_MSI_FLAGS,
8837                                              &ctrl);
8838                         pci_write_config_word(tp->pdev,
8839                                               tp->msi_cap + PCI_MSI_FLAGS,
8840                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8841                         val = tr32(MSGINT_MODE);
8842                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8843                 }
8844         }
8845 }
8846
8847 /* tp->lock is held. */
8848 static int tg3_chip_reset(struct tg3 *tp)
8849 {
8850         u32 val;
8851         void (*write_op)(struct tg3 *, u32, u32);
8852         int i, err;
8853
8854         tg3_nvram_lock(tp);
8855
8856         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8857
8858         /* No matching tg3_nvram_unlock() after this because
8859          * chip reset below will undo the nvram lock.
8860          */
8861         tp->nvram_lock_cnt = 0;
8862
8863         /* GRC_MISC_CFG core clock reset will clear the memory
8864          * enable bit in PCI register 4 and the MSI enable bit
8865          * on some chips, so we save relevant registers here.
8866          */
8867         tg3_save_pci_state(tp);
8868
8869         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8870             tg3_flag(tp, 5755_PLUS))
8871                 tw32(GRC_FASTBOOT_PC, 0);
8872
8873         /*
8874          * We must avoid the readl() that normally takes place.
8875          * It locks machines, causes machine checks, and other
8876          * fun things.  So, temporarily disable the 5701
8877          * hardware workaround, while we do the reset.
8878          */
8879         write_op = tp->write32;
8880         if (write_op == tg3_write_flush_reg32)
8881                 tp->write32 = tg3_write32;
8882
8883         /* Prevent the irq handler from reading or writing PCI registers
8884          * during chip reset when the memory enable bit in the PCI command
8885          * register may be cleared.  The chip does not generate interrupt
8886          * at this time, but the irq handler may still be called due to irq
8887          * sharing or irqpoll.
8888          */
8889         tg3_flag_set(tp, CHIP_RESETTING);
8890         for (i = 0; i < tp->irq_cnt; i++) {
8891                 struct tg3_napi *tnapi = &tp->napi[i];
8892                 if (tnapi->hw_status) {
8893                         tnapi->hw_status->status = 0;
8894                         tnapi->hw_status->status_tag = 0;
8895                 }
8896                 tnapi->last_tag = 0;
8897                 tnapi->last_irq_tag = 0;
8898         }
8899         smp_mb();
8900
8901         for (i = 0; i < tp->irq_cnt; i++)
8902                 synchronize_irq(tp->napi[i].irq_vec);
8903
8904         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8905                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8906                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8907         }
8908
8909         /* do the reset */
8910         val = GRC_MISC_CFG_CORECLK_RESET;
8911
8912         if (tg3_flag(tp, PCI_EXPRESS)) {
8913                 /* Force PCIe 1.0a mode */
8914                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8915                     !tg3_flag(tp, 57765_PLUS) &&
8916                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8917                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8918                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8919
8920                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8921                         tw32(GRC_MISC_CFG, (1 << 29));
8922                         val |= (1 << 29);
8923                 }
8924         }
8925
8926         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8927                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8928                 tw32(GRC_VCPU_EXT_CTRL,
8929                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8930         }
8931
8932         /* Manage gphy power for all CPMU absent PCIe devices. */
8933         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8934                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8935
8936         tw32(GRC_MISC_CFG, val);
8937
8938         /* restore 5701 hardware bug workaround write method */
8939         tp->write32 = write_op;
8940
8941         /* Unfortunately, we have to delay before the PCI read back.
8942          * Some 575X chips even will not respond to a PCI cfg access
8943          * when the reset command is given to the chip.
8944          *
8945          * How do these hardware designers expect things to work
8946          * properly if the PCI write is posted for a long period
8947          * of time?  It is always necessary to have some method by
8948          * which a register read back can occur to push the write
8949          * out which does the reset.
8950          *
8951          * For most tg3 variants the trick below was working.
8952          * Ho hum...
8953          */
8954         udelay(120);
8955
8956         /* Flush PCI posted writes.  The normal MMIO registers
8957          * are inaccessible at this time so this is the only
8958          * way to make this reliably (actually, this is no longer
8959          * the case, see above).  I tried to use indirect
8960          * register read/write but this upset some 5701 variants.
8961          */
8962         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8963
8964         udelay(120);
8965
8966         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8967                 u16 val16;
8968
8969                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8970                         int j;
8971                         u32 cfg_val;
8972
8973                         /* Wait for link training to complete.  */
8974                         for (j = 0; j < 5000; j++)
8975                                 udelay(100);
8976
8977                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8978                         pci_write_config_dword(tp->pdev, 0xc4,
8979                                                cfg_val | (1 << 15));
8980                 }
8981
8982                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8983                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8984                 /*
8985                  * Older PCIe devices only support the 128 byte
8986                  * MPS setting.  Enforce the restriction.
8987                  */
8988                 if (!tg3_flag(tp, CPMU_PRESENT))
8989                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8990                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8991
8992                 /* Clear error status */
8993                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8994                                       PCI_EXP_DEVSTA_CED |
8995                                       PCI_EXP_DEVSTA_NFED |
8996                                       PCI_EXP_DEVSTA_FED |
8997                                       PCI_EXP_DEVSTA_URD);
8998         }
8999
9000         tg3_restore_pci_state(tp);
9001
9002         tg3_flag_clear(tp, CHIP_RESETTING);
9003         tg3_flag_clear(tp, ERROR_PROCESSED);
9004
9005         val = 0;
9006         if (tg3_flag(tp, 5780_CLASS))
9007                 val = tr32(MEMARB_MODE);
9008         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9009
9010         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9011                 tg3_stop_fw(tp);
9012                 tw32(0x5000, 0x400);
9013         }
9014
9015         if (tg3_flag(tp, IS_SSB_CORE)) {
9016                 /*
9017                  * BCM4785: In order to avoid repercussions from using
9018                  * potentially defective internal ROM, stop the Rx RISC CPU,
9019                  * which is not required.
9020                  */
9021                 tg3_stop_fw(tp);
9022                 tg3_halt_cpu(tp, RX_CPU_BASE);
9023         }
9024
9025         err = tg3_poll_fw(tp);
9026         if (err)
9027                 return err;
9028
9029         tw32(GRC_MODE, tp->grc_mode);
9030
9031         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9032                 val = tr32(0xc4);
9033
9034                 tw32(0xc4, val | (1 << 15));
9035         }
9036
9037         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9038             tg3_asic_rev(tp) == ASIC_REV_5705) {
9039                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9040                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9041                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9042                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9043         }
9044
9045         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9046                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9047                 val = tp->mac_mode;
9048         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9049                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9050                 val = tp->mac_mode;
9051         } else
9052                 val = 0;
9053
9054         tw32_f(MAC_MODE, val);
9055         udelay(40);
9056
9057         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9058
9059         tg3_mdio_start(tp);
9060
9061         if (tg3_flag(tp, PCI_EXPRESS) &&
9062             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9063             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9064             !tg3_flag(tp, 57765_PLUS)) {
9065                 val = tr32(0x7c00);
9066
9067                 tw32(0x7c00, val | (1 << 25));
9068         }
9069
9070         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9071                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9072                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9073         }
9074
9075         /* Reprobe ASF enable state.  */
9076         tg3_flag_clear(tp, ENABLE_ASF);
9077         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9078                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9079
9080         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9081         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9082         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9083                 u32 nic_cfg;
9084
9085                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9086                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9087                         tg3_flag_set(tp, ENABLE_ASF);
9088                         tp->last_event_jiffies = jiffies;
9089                         if (tg3_flag(tp, 5750_PLUS))
9090                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9091
9092                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9093                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9094                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9095                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9096                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9097                 }
9098         }
9099
9100         return 0;
9101 }
9102
9103 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9104 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9105
9106 /* tp->lock is held. */
9107 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9108 {
9109         int err;
9110
9111         tg3_stop_fw(tp);
9112
9113         tg3_write_sig_pre_reset(tp, kind);
9114
9115         tg3_abort_hw(tp, silent);
9116         err = tg3_chip_reset(tp);
9117
9118         __tg3_set_mac_addr(tp, false);
9119
9120         tg3_write_sig_legacy(tp, kind);
9121         tg3_write_sig_post_reset(tp, kind);
9122
9123         if (tp->hw_stats) {
9124                 /* Save the stats across chip resets... */
9125                 tg3_get_nstats(tp, &tp->net_stats_prev);
9126                 tg3_get_estats(tp, &tp->estats_prev);
9127
9128                 /* And make sure the next sample is new data */
9129                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9130         }
9131
9132         if (err)
9133                 return err;
9134
9135         return 0;
9136 }
9137
9138 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9139 {
9140         struct tg3 *tp = netdev_priv(dev);
9141         struct sockaddr *addr = p;
9142         int err = 0;
9143         bool skip_mac_1 = false;
9144
9145         if (!is_valid_ether_addr(addr->sa_data))
9146                 return -EADDRNOTAVAIL;
9147
9148         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9149
9150         if (!netif_running(dev))
9151                 return 0;
9152
9153         if (tg3_flag(tp, ENABLE_ASF)) {
9154                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9155
9156                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9157                 addr0_low = tr32(MAC_ADDR_0_LOW);
9158                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9159                 addr1_low = tr32(MAC_ADDR_1_LOW);
9160
9161                 /* Skip MAC addr 1 if ASF is using it. */
9162                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9163                     !(addr1_high == 0 && addr1_low == 0))
9164                         skip_mac_1 = true;
9165         }
9166         spin_lock_bh(&tp->lock);
9167         __tg3_set_mac_addr(tp, skip_mac_1);
9168         spin_unlock_bh(&tp->lock);
9169
9170         return err;
9171 }
9172
9173 /* tp->lock is held. */
9174 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9175                            dma_addr_t mapping, u32 maxlen_flags,
9176                            u32 nic_addr)
9177 {
9178         tg3_write_mem(tp,
9179                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9180                       ((u64) mapping >> 32));
9181         tg3_write_mem(tp,
9182                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9183                       ((u64) mapping & 0xffffffff));
9184         tg3_write_mem(tp,
9185                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9186                        maxlen_flags);
9187
9188         if (!tg3_flag(tp, 5705_PLUS))
9189                 tg3_write_mem(tp,
9190                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9191                               nic_addr);
9192 }
9193
9194
9195 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9196 {
9197         int i = 0;
9198
9199         if (!tg3_flag(tp, ENABLE_TSS)) {
9200                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9201                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9202                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9203         } else {
9204                 tw32(HOSTCC_TXCOL_TICKS, 0);
9205                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9206                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9207
9208                 for (; i < tp->txq_cnt; i++) {
9209                         u32 reg;
9210
9211                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9212                         tw32(reg, ec->tx_coalesce_usecs);
9213                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9214                         tw32(reg, ec->tx_max_coalesced_frames);
9215                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9216                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9217                 }
9218         }
9219
9220         for (; i < tp->irq_max - 1; i++) {
9221                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9222                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9223                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9224         }
9225 }
9226
9227 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9228 {
9229         int i = 0;
9230         u32 limit = tp->rxq_cnt;
9231
9232         if (!tg3_flag(tp, ENABLE_RSS)) {
9233                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9234                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9235                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9236                 limit--;
9237         } else {
9238                 tw32(HOSTCC_RXCOL_TICKS, 0);
9239                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9240                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9241         }
9242
9243         for (; i < limit; i++) {
9244                 u32 reg;
9245
9246                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9247                 tw32(reg, ec->rx_coalesce_usecs);
9248                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9249                 tw32(reg, ec->rx_max_coalesced_frames);
9250                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9251                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9252         }
9253
9254         for (; i < tp->irq_max - 1; i++) {
9255                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9256                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9257                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9258         }
9259 }
9260
9261 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9262 {
9263         tg3_coal_tx_init(tp, ec);
9264         tg3_coal_rx_init(tp, ec);
9265
9266         if (!tg3_flag(tp, 5705_PLUS)) {
9267                 u32 val = ec->stats_block_coalesce_usecs;
9268
9269                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9270                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9271
9272                 if (!tp->link_up)
9273                         val = 0;
9274
9275                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9276         }
9277 }
9278
9279 /* tp->lock is held. */
9280 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9281 {
9282         u32 txrcb, limit;
9283
9284         /* Disable all transmit rings but the first. */
9285         if (!tg3_flag(tp, 5705_PLUS))
9286                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9287         else if (tg3_flag(tp, 5717_PLUS))
9288                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9289         else if (tg3_flag(tp, 57765_CLASS) ||
9290                  tg3_asic_rev(tp) == ASIC_REV_5762)
9291                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9292         else
9293                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9294
9295         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9296              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9297                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9298                               BDINFO_FLAGS_DISABLED);
9299 }
9300
9301 /* tp->lock is held. */
9302 static void tg3_tx_rcbs_init(struct tg3 *tp)
9303 {
9304         int i = 0;
9305         u32 txrcb = NIC_SRAM_SEND_RCB;
9306
9307         if (tg3_flag(tp, ENABLE_TSS))
9308                 i++;
9309
9310         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9311                 struct tg3_napi *tnapi = &tp->napi[i];
9312
9313                 if (!tnapi->tx_ring)
9314                         continue;
9315
9316                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9317                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9318                                NIC_SRAM_TX_BUFFER_DESC);
9319         }
9320 }
9321
9322 /* tp->lock is held. */
9323 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9324 {
9325         u32 rxrcb, limit;
9326
9327         /* Disable all receive return rings but the first. */
9328         if (tg3_flag(tp, 5717_PLUS))
9329                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9330         else if (!tg3_flag(tp, 5705_PLUS))
9331                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9332         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9333                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9334                  tg3_flag(tp, 57765_CLASS))
9335                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9336         else
9337                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9338
9339         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9340              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9341                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9342                               BDINFO_FLAGS_DISABLED);
9343 }
9344
9345 /* tp->lock is held. */
9346 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9347 {
9348         int i = 0;
9349         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9350
9351         if (tg3_flag(tp, ENABLE_RSS))
9352                 i++;
9353
9354         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9355                 struct tg3_napi *tnapi = &tp->napi[i];
9356
9357                 if (!tnapi->rx_rcb)
9358                         continue;
9359
9360                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9361                                (tp->rx_ret_ring_mask + 1) <<
9362                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9363         }
9364 }
9365
9366 /* tp->lock is held. */
9367 static void tg3_rings_reset(struct tg3 *tp)
9368 {
9369         int i;
9370         u32 stblk;
9371         struct tg3_napi *tnapi = &tp->napi[0];
9372
9373         tg3_tx_rcbs_disable(tp);
9374
9375         tg3_rx_ret_rcbs_disable(tp);
9376
9377         /* Disable interrupts */
9378         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9379         tp->napi[0].chk_msi_cnt = 0;
9380         tp->napi[0].last_rx_cons = 0;
9381         tp->napi[0].last_tx_cons = 0;
9382
9383         /* Zero mailbox registers. */
9384         if (tg3_flag(tp, SUPPORT_MSIX)) {
9385                 for (i = 1; i < tp->irq_max; i++) {
9386                         tp->napi[i].tx_prod = 0;
9387                         tp->napi[i].tx_cons = 0;
9388                         if (tg3_flag(tp, ENABLE_TSS))
9389                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9390                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9391                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9392                         tp->napi[i].chk_msi_cnt = 0;
9393                         tp->napi[i].last_rx_cons = 0;
9394                         tp->napi[i].last_tx_cons = 0;
9395                 }
9396                 if (!tg3_flag(tp, ENABLE_TSS))
9397                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9398         } else {
9399                 tp->napi[0].tx_prod = 0;
9400                 tp->napi[0].tx_cons = 0;
9401                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9402                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9403         }
9404
9405         /* Make sure the NIC-based send BD rings are disabled. */
9406         if (!tg3_flag(tp, 5705_PLUS)) {
9407                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9408                 for (i = 0; i < 16; i++)
9409                         tw32_tx_mbox(mbox + i * 8, 0);
9410         }
9411
9412         /* Clear status block in ram. */
9413         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9414
9415         /* Set status block DMA address */
9416         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9417              ((u64) tnapi->status_mapping >> 32));
9418         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9419              ((u64) tnapi->status_mapping & 0xffffffff));
9420
9421         stblk = HOSTCC_STATBLCK_RING1;
9422
9423         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9424                 u64 mapping = (u64)tnapi->status_mapping;
9425                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9426                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9427                 stblk += 8;
9428
9429                 /* Clear status block in ram. */
9430                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9431         }
9432
9433         tg3_tx_rcbs_init(tp);
9434         tg3_rx_ret_rcbs_init(tp);
9435 }
9436
9437 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9438 {
9439         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9440
9441         if (!tg3_flag(tp, 5750_PLUS) ||
9442             tg3_flag(tp, 5780_CLASS) ||
9443             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9444             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9445             tg3_flag(tp, 57765_PLUS))
9446                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9447         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9448                  tg3_asic_rev(tp) == ASIC_REV_5787)
9449                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9450         else
9451                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9452
9453         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9454         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9455
9456         val = min(nic_rep_thresh, host_rep_thresh);
9457         tw32(RCVBDI_STD_THRESH, val);
9458
9459         if (tg3_flag(tp, 57765_PLUS))
9460                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9461
9462         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9463                 return;
9464
9465         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9466
9467         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9468
9469         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9470         tw32(RCVBDI_JUMBO_THRESH, val);
9471
9472         if (tg3_flag(tp, 57765_PLUS))
9473                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9474 }
9475
9476 static inline u32 calc_crc(unsigned char *buf, int len)
9477 {
9478         u32 reg;
9479         u32 tmp;
9480         int j, k;
9481
9482         reg = 0xffffffff;
9483
9484         for (j = 0; j < len; j++) {
9485                 reg ^= buf[j];
9486
9487                 for (k = 0; k < 8; k++) {
9488                         tmp = reg & 0x01;
9489
9490                         reg >>= 1;
9491
9492                         if (tmp)
9493                                 reg ^= 0xedb88320;
9494                 }
9495         }
9496
9497         return ~reg;
9498 }
9499
9500 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9501 {
9502         /* accept or reject all multicast frames */
9503         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9504         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9505         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9506         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9507 }
9508
9509 static void __tg3_set_rx_mode(struct net_device *dev)
9510 {
9511         struct tg3 *tp = netdev_priv(dev);
9512         u32 rx_mode;
9513
9514         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9515                                   RX_MODE_KEEP_VLAN_TAG);
9516
9517 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9518         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9519          * flag clear.
9520          */
9521         if (!tg3_flag(tp, ENABLE_ASF))
9522                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9523 #endif
9524
9525         if (dev->flags & IFF_PROMISC) {
9526                 /* Promiscuous mode. */
9527                 rx_mode |= RX_MODE_PROMISC;
9528         } else if (dev->flags & IFF_ALLMULTI) {
9529                 /* Accept all multicast. */
9530                 tg3_set_multi(tp, 1);
9531         } else if (netdev_mc_empty(dev)) {
9532                 /* Reject all multicast. */
9533                 tg3_set_multi(tp, 0);
9534         } else {
9535                 /* Accept one or more multicast(s). */
9536                 struct netdev_hw_addr *ha;
9537                 u32 mc_filter[4] = { 0, };
9538                 u32 regidx;
9539                 u32 bit;
9540                 u32 crc;
9541
9542                 netdev_for_each_mc_addr(ha, dev) {
9543                         crc = calc_crc(ha->addr, ETH_ALEN);
9544                         bit = ~crc & 0x7f;
9545                         regidx = (bit & 0x60) >> 5;
9546                         bit &= 0x1f;
9547                         mc_filter[regidx] |= (1 << bit);
9548                 }
9549
9550                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9551                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9552                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9553                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9554         }
9555
9556         if (rx_mode != tp->rx_mode) {
9557                 tp->rx_mode = rx_mode;
9558                 tw32_f(MAC_RX_MODE, rx_mode);
9559                 udelay(10);
9560         }
9561 }
9562
9563 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9564 {
9565         int i;
9566
9567         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9568                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9569 }
9570
9571 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9572 {
9573         int i;
9574
9575         if (!tg3_flag(tp, SUPPORT_MSIX))
9576                 return;
9577
9578         if (tp->rxq_cnt == 1) {
9579                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9580                 return;
9581         }
9582
9583         /* Validate table against current IRQ count */
9584         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9585                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9586                         break;
9587         }
9588
9589         if (i != TG3_RSS_INDIR_TBL_SIZE)
9590                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9591 }
9592
9593 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9594 {
9595         int i = 0;
9596         u32 reg = MAC_RSS_INDIR_TBL_0;
9597
9598         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9599                 u32 val = tp->rss_ind_tbl[i];
9600                 i++;
9601                 for (; i % 8; i++) {
9602                         val <<= 4;
9603                         val |= tp->rss_ind_tbl[i];
9604                 }
9605                 tw32(reg, val);
9606                 reg += 4;
9607         }
9608 }
9609
9610 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9611 {
9612         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9613                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9614         else
9615                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9616 }
9617
9618 /* tp->lock is held. */
9619 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9620 {
9621         u32 val, rdmac_mode;
9622         int i, err, limit;
9623         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9624
9625         tg3_disable_ints(tp);
9626
9627         tg3_stop_fw(tp);
9628
9629         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9630
9631         if (tg3_flag(tp, INIT_COMPLETE))
9632                 tg3_abort_hw(tp, 1);
9633
9634         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9635             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9636                 tg3_phy_pull_config(tp);
9637                 tg3_eee_pull_config(tp, NULL);
9638                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9639         }
9640
9641         /* Enable MAC control of LPI */
9642         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9643                 tg3_setup_eee(tp);
9644
9645         if (reset_phy)
9646                 tg3_phy_reset(tp);
9647
9648         err = tg3_chip_reset(tp);
9649         if (err)
9650                 return err;
9651
9652         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9653
9654         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9655                 val = tr32(TG3_CPMU_CTRL);
9656                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9657                 tw32(TG3_CPMU_CTRL, val);
9658
9659                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9660                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9661                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9662                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9663
9664                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9665                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9666                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9667                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9668
9669                 val = tr32(TG3_CPMU_HST_ACC);
9670                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9671                 val |= CPMU_HST_ACC_MACCLK_6_25;
9672                 tw32(TG3_CPMU_HST_ACC, val);
9673         }
9674
9675         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9676                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9677                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9678                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9679                 tw32(PCIE_PWR_MGMT_THRESH, val);
9680
9681                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9682                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9683
9684                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9685
9686                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9687                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9688         }
9689
9690         if (tg3_flag(tp, L1PLLPD_EN)) {
9691                 u32 grc_mode = tr32(GRC_MODE);
9692
9693                 /* Access the lower 1K of PL PCIE block registers. */
9694                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9695                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9696
9697                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9698                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9699                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9700
9701                 tw32(GRC_MODE, grc_mode);
9702         }
9703
9704         if (tg3_flag(tp, 57765_CLASS)) {
9705                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9706                         u32 grc_mode = tr32(GRC_MODE);
9707
9708                         /* Access the lower 1K of PL PCIE block registers. */
9709                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9710                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9711
9712                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9713                                    TG3_PCIE_PL_LO_PHYCTL5);
9714                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9715                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9716
9717                         tw32(GRC_MODE, grc_mode);
9718                 }
9719
9720                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9721                         u32 grc_mode;
9722
9723                         /* Fix transmit hangs */
9724                         val = tr32(TG3_CPMU_PADRNG_CTL);
9725                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9726                         tw32(TG3_CPMU_PADRNG_CTL, val);
9727
9728                         grc_mode = tr32(GRC_MODE);
9729
9730                         /* Access the lower 1K of DL PCIE block registers. */
9731                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9732                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9733
9734                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9735                                    TG3_PCIE_DL_LO_FTSMAX);
9736                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9737                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9738                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9739
9740                         tw32(GRC_MODE, grc_mode);
9741                 }
9742
9743                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9744                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9745                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9746                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9747         }
9748
9749         /* This works around an issue with Athlon chipsets on
9750          * B3 tigon3 silicon.  This bit has no effect on any
9751          * other revision.  But do not set this on PCI Express
9752          * chips and don't even touch the clocks if the CPMU is present.
9753          */
9754         if (!tg3_flag(tp, CPMU_PRESENT)) {
9755                 if (!tg3_flag(tp, PCI_EXPRESS))
9756                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9757                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9758         }
9759
9760         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9761             tg3_flag(tp, PCIX_MODE)) {
9762                 val = tr32(TG3PCI_PCISTATE);
9763                 val |= PCISTATE_RETRY_SAME_DMA;
9764                 tw32(TG3PCI_PCISTATE, val);
9765         }
9766
9767         if (tg3_flag(tp, ENABLE_APE)) {
9768                 /* Allow reads and writes to the
9769                  * APE register and memory space.
9770                  */
9771                 val = tr32(TG3PCI_PCISTATE);
9772                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9773                        PCISTATE_ALLOW_APE_SHMEM_WR |
9774                        PCISTATE_ALLOW_APE_PSPACE_WR;
9775                 tw32(TG3PCI_PCISTATE, val);
9776         }
9777
9778         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9779                 /* Enable some hw fixes.  */
9780                 val = tr32(TG3PCI_MSI_DATA);
9781                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9782                 tw32(TG3PCI_MSI_DATA, val);
9783         }
9784
9785         /* Descriptor ring init may make accesses to the
9786          * NIC SRAM area to setup the TX descriptors, so we
9787          * can only do this after the hardware has been
9788          * successfully reset.
9789          */
9790         err = tg3_init_rings(tp);
9791         if (err)
9792                 return err;
9793
9794         if (tg3_flag(tp, 57765_PLUS)) {
9795                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9796                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9797                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9798                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9799                 if (!tg3_flag(tp, 57765_CLASS) &&
9800                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9801                     tg3_asic_rev(tp) != ASIC_REV_5762)
9802                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9803                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9804         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9805                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9806                 /* This value is determined during the probe time DMA
9807                  * engine test, tg3_test_dma.
9808                  */
9809                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9810         }
9811
9812         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9813                           GRC_MODE_4X_NIC_SEND_RINGS |
9814                           GRC_MODE_NO_TX_PHDR_CSUM |
9815                           GRC_MODE_NO_RX_PHDR_CSUM);
9816         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9817
9818         /* Pseudo-header checksum is done by hardware logic and not
9819          * the offload processers, so make the chip do the pseudo-
9820          * header checksums on receive.  For transmit it is more
9821          * convenient to do the pseudo-header checksum in software
9822          * as Linux does that on transmit for us in all cases.
9823          */
9824         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9825
9826         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9827         if (tp->rxptpctl)
9828                 tw32(TG3_RX_PTP_CTL,
9829                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9830
9831         if (tg3_flag(tp, PTP_CAPABLE))
9832                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9833
9834         tw32(GRC_MODE, tp->grc_mode | val);
9835
9836         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9837         val = tr32(GRC_MISC_CFG);
9838         val &= ~0xff;
9839         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9840         tw32(GRC_MISC_CFG, val);
9841
9842         /* Initialize MBUF/DESC pool. */
9843         if (tg3_flag(tp, 5750_PLUS)) {
9844                 /* Do nothing.  */
9845         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9846                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9847                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9848                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9849                 else
9850                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9851                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9852                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9853         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9854                 int fw_len;
9855
9856                 fw_len = tp->fw_len;
9857                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9858                 tw32(BUFMGR_MB_POOL_ADDR,
9859                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9860                 tw32(BUFMGR_MB_POOL_SIZE,
9861                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9862         }
9863
9864         if (tp->dev->mtu <= ETH_DATA_LEN) {
9865                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9866                      tp->bufmgr_config.mbuf_read_dma_low_water);
9867                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9868                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9869                 tw32(BUFMGR_MB_HIGH_WATER,
9870                      tp->bufmgr_config.mbuf_high_water);
9871         } else {
9872                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9873                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9874                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9875                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9876                 tw32(BUFMGR_MB_HIGH_WATER,
9877                      tp->bufmgr_config.mbuf_high_water_jumbo);
9878         }
9879         tw32(BUFMGR_DMA_LOW_WATER,
9880              tp->bufmgr_config.dma_low_water);
9881         tw32(BUFMGR_DMA_HIGH_WATER,
9882              tp->bufmgr_config.dma_high_water);
9883
9884         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9885         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9886                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9887         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9888             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9889             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9890                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9891         tw32(BUFMGR_MODE, val);
9892         for (i = 0; i < 2000; i++) {
9893                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9894                         break;
9895                 udelay(10);
9896         }
9897         if (i >= 2000) {
9898                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9899                 return -ENODEV;
9900         }
9901
9902         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9903                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9904
9905         tg3_setup_rxbd_thresholds(tp);
9906
9907         /* Initialize TG3_BDINFO's at:
9908          *  RCVDBDI_STD_BD:     standard eth size rx ring
9909          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9910          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9911          *
9912          * like so:
9913          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9914          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9915          *                              ring attribute flags
9916          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9917          *
9918          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9919          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9920          *
9921          * The size of each ring is fixed in the firmware, but the location is
9922          * configurable.
9923          */
9924         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9925              ((u64) tpr->rx_std_mapping >> 32));
9926         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9927              ((u64) tpr->rx_std_mapping & 0xffffffff));
9928         if (!tg3_flag(tp, 5717_PLUS))
9929                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9930                      NIC_SRAM_RX_BUFFER_DESC);
9931
9932         /* Disable the mini ring */
9933         if (!tg3_flag(tp, 5705_PLUS))
9934                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9935                      BDINFO_FLAGS_DISABLED);
9936
9937         /* Program the jumbo buffer descriptor ring control
9938          * blocks on those devices that have them.
9939          */
9940         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9941             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9942
9943                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9944                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9945                              ((u64) tpr->rx_jmb_mapping >> 32));
9946                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9947                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9948                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9949                               BDINFO_FLAGS_MAXLEN_SHIFT;
9950                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9951                              val | BDINFO_FLAGS_USE_EXT_RECV);
9952                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9953                             tg3_flag(tp, 57765_CLASS) ||
9954                             tg3_asic_rev(tp) == ASIC_REV_5762)
9955                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9956                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9957                 } else {
9958                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9959                              BDINFO_FLAGS_DISABLED);
9960                 }
9961
9962                 if (tg3_flag(tp, 57765_PLUS)) {
9963                         val = TG3_RX_STD_RING_SIZE(tp);
9964                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9965                         val |= (TG3_RX_STD_DMA_SZ << 2);
9966                 } else
9967                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9968         } else
9969                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9970
9971         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9972
9973         tpr->rx_std_prod_idx = tp->rx_pending;
9974         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9975
9976         tpr->rx_jmb_prod_idx =
9977                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9978         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9979
9980         tg3_rings_reset(tp);
9981
9982         /* Initialize MAC address and backoff seed. */
9983         __tg3_set_mac_addr(tp, false);
9984
9985         /* MTU + ethernet header + FCS + optional VLAN tag */
9986         tw32(MAC_RX_MTU_SIZE,
9987              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9988
9989         /* The slot time is changed by tg3_setup_phy if we
9990          * run at gigabit with half duplex.
9991          */
9992         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9993               (6 << TX_LENGTHS_IPG_SHIFT) |
9994               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9995
9996         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9997             tg3_asic_rev(tp) == ASIC_REV_5762)
9998                 val |= tr32(MAC_TX_LENGTHS) &
9999                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10000                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10001
10002         tw32(MAC_TX_LENGTHS, val);
10003
10004         /* Receive rules. */
10005         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10006         tw32(RCVLPC_CONFIG, 0x0181);
10007
10008         /* Calculate RDMAC_MODE setting early, we need it to determine
10009          * the RCVLPC_STATE_ENABLE mask.
10010          */
10011         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10012                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10013                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10014                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10015                       RDMAC_MODE_LNGREAD_ENAB);
10016
10017         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10018                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10019
10020         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10021             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10022             tg3_asic_rev(tp) == ASIC_REV_57780)
10023                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10024                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10025                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10026
10027         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10028             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10029                 if (tg3_flag(tp, TSO_CAPABLE) &&
10030                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10031                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10032                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10033                            !tg3_flag(tp, IS_5788)) {
10034                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10035                 }
10036         }
10037
10038         if (tg3_flag(tp, PCI_EXPRESS))
10039                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10040
10041         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10042                 tp->dma_limit = 0;
10043                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10044                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10045                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10046                 }
10047         }
10048
10049         if (tg3_flag(tp, HW_TSO_1) ||
10050             tg3_flag(tp, HW_TSO_2) ||
10051             tg3_flag(tp, HW_TSO_3))
10052                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10053
10054         if (tg3_flag(tp, 57765_PLUS) ||
10055             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10056             tg3_asic_rev(tp) == ASIC_REV_57780)
10057                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10058
10059         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10060             tg3_asic_rev(tp) == ASIC_REV_5762)
10061                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10062
10063         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10064             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10065             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10066             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10067             tg3_flag(tp, 57765_PLUS)) {
10068                 u32 tgtreg;
10069
10070                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10071                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10072                 else
10073                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10074
10075                 val = tr32(tgtreg);
10076                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10077                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10078                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10079                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10080                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10081                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10082                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10083                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10084                 }
10085                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10086         }
10087
10088         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10089             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10090             tg3_asic_rev(tp) == ASIC_REV_5762) {
10091                 u32 tgtreg;
10092
10093                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10094                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10095                 else
10096                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10097
10098                 val = tr32(tgtreg);
10099                 tw32(tgtreg, val |
10100                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10101                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10102         }
10103
10104         /* Receive/send statistics. */
10105         if (tg3_flag(tp, 5750_PLUS)) {
10106                 val = tr32(RCVLPC_STATS_ENABLE);
10107                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10108                 tw32(RCVLPC_STATS_ENABLE, val);
10109         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10110                    tg3_flag(tp, TSO_CAPABLE)) {
10111                 val = tr32(RCVLPC_STATS_ENABLE);
10112                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10113                 tw32(RCVLPC_STATS_ENABLE, val);
10114         } else {
10115                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10116         }
10117         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10118         tw32(SNDDATAI_STATSENAB, 0xffffff);
10119         tw32(SNDDATAI_STATSCTRL,
10120              (SNDDATAI_SCTRL_ENABLE |
10121               SNDDATAI_SCTRL_FASTUPD));
10122
10123         /* Setup host coalescing engine. */
10124         tw32(HOSTCC_MODE, 0);
10125         for (i = 0; i < 2000; i++) {
10126                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10127                         break;
10128                 udelay(10);
10129         }
10130
10131         __tg3_set_coalesce(tp, &tp->coal);
10132
10133         if (!tg3_flag(tp, 5705_PLUS)) {
10134                 /* Status/statistics block address.  See tg3_timer,
10135                  * the tg3_periodic_fetch_stats call there, and
10136                  * tg3_get_stats to see how this works for 5705/5750 chips.
10137                  */
10138                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10139                      ((u64) tp->stats_mapping >> 32));
10140                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10141                      ((u64) tp->stats_mapping & 0xffffffff));
10142                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10143
10144                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10145
10146                 /* Clear statistics and status block memory areas */
10147                 for (i = NIC_SRAM_STATS_BLK;
10148                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10149                      i += sizeof(u32)) {
10150                         tg3_write_mem(tp, i, 0);
10151                         udelay(40);
10152                 }
10153         }
10154
10155         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10156
10157         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10158         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10159         if (!tg3_flag(tp, 5705_PLUS))
10160                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10161
10162         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10163                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10164                 /* reset to prevent losing 1st rx packet intermittently */
10165                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10166                 udelay(10);
10167         }
10168
10169         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10170                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10171                         MAC_MODE_FHDE_ENABLE;
10172         if (tg3_flag(tp, ENABLE_APE))
10173                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10174         if (!tg3_flag(tp, 5705_PLUS) &&
10175             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10176             tg3_asic_rev(tp) != ASIC_REV_5700)
10177                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10178         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10179         udelay(40);
10180
10181         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10182          * If TG3_FLAG_IS_NIC is zero, we should read the
10183          * register to preserve the GPIO settings for LOMs. The GPIOs,
10184          * whether used as inputs or outputs, are set by boot code after
10185          * reset.
10186          */
10187         if (!tg3_flag(tp, IS_NIC)) {
10188                 u32 gpio_mask;
10189
10190                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10191                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10192                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10193
10194                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10195                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10196                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10197
10198                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10199                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10200
10201                 tp->grc_local_ctrl &= ~gpio_mask;
10202                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10203
10204                 /* GPIO1 must be driven high for eeprom write protect */
10205                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10206                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10207                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10208         }
10209         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10210         udelay(100);
10211
10212         if (tg3_flag(tp, USING_MSIX)) {
10213                 val = tr32(MSGINT_MODE);
10214                 val |= MSGINT_MODE_ENABLE;
10215                 if (tp->irq_cnt > 1)
10216                         val |= MSGINT_MODE_MULTIVEC_EN;
10217                 if (!tg3_flag(tp, 1SHOT_MSI))
10218                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10219                 tw32(MSGINT_MODE, val);
10220         }
10221
10222         if (!tg3_flag(tp, 5705_PLUS)) {
10223                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10224                 udelay(40);
10225         }
10226
10227         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10228                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10229                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10230                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10231                WDMAC_MODE_LNGREAD_ENAB);
10232
10233         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10234             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10235                 if (tg3_flag(tp, TSO_CAPABLE) &&
10236                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10237                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10238                         /* nothing */
10239                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10240                            !tg3_flag(tp, IS_5788)) {
10241                         val |= WDMAC_MODE_RX_ACCEL;
10242                 }
10243         }
10244
10245         /* Enable host coalescing bug fix */
10246         if (tg3_flag(tp, 5755_PLUS))
10247                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10248
10249         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10250                 val |= WDMAC_MODE_BURST_ALL_DATA;
10251
10252         tw32_f(WDMAC_MODE, val);
10253         udelay(40);
10254
10255         if (tg3_flag(tp, PCIX_MODE)) {
10256                 u16 pcix_cmd;
10257
10258                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10259                                      &pcix_cmd);
10260                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10261                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10262                         pcix_cmd |= PCI_X_CMD_READ_2K;
10263                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10264                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10265                         pcix_cmd |= PCI_X_CMD_READ_2K;
10266                 }
10267                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10268                                       pcix_cmd);
10269         }
10270
10271         tw32_f(RDMAC_MODE, rdmac_mode);
10272         udelay(40);
10273
10274         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10275             tg3_asic_rev(tp) == ASIC_REV_5720) {
10276                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10277                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10278                                 break;
10279                 }
10280                 if (i < TG3_NUM_RDMA_CHANNELS) {
10281                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10282                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10283                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10284                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10285                 }
10286         }
10287
10288         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10289         if (!tg3_flag(tp, 5705_PLUS))
10290                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10291
10292         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10293                 tw32(SNDDATAC_MODE,
10294                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10295         else
10296                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10297
10298         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10299         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10300         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10301         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10302                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10303         tw32(RCVDBDI_MODE, val);
10304         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10305         if (tg3_flag(tp, HW_TSO_1) ||
10306             tg3_flag(tp, HW_TSO_2) ||
10307             tg3_flag(tp, HW_TSO_3))
10308                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10309         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10310         if (tg3_flag(tp, ENABLE_TSS))
10311                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10312         tw32(SNDBDI_MODE, val);
10313         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10314
10315         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10316                 err = tg3_load_5701_a0_firmware_fix(tp);
10317                 if (err)
10318                         return err;
10319         }
10320
10321         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10322                 /* Ignore any errors for the firmware download. If download
10323                  * fails, the device will operate with EEE disabled
10324                  */
10325                 tg3_load_57766_firmware(tp);
10326         }
10327
10328         if (tg3_flag(tp, TSO_CAPABLE)) {
10329                 err = tg3_load_tso_firmware(tp);
10330                 if (err)
10331                         return err;
10332         }
10333
10334         tp->tx_mode = TX_MODE_ENABLE;
10335
10336         if (tg3_flag(tp, 5755_PLUS) ||
10337             tg3_asic_rev(tp) == ASIC_REV_5906)
10338                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10339
10340         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10341             tg3_asic_rev(tp) == ASIC_REV_5762) {
10342                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10343                 tp->tx_mode &= ~val;
10344                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10345         }
10346
10347         tw32_f(MAC_TX_MODE, tp->tx_mode);
10348         udelay(100);
10349
10350         if (tg3_flag(tp, ENABLE_RSS)) {
10351                 tg3_rss_write_indir_tbl(tp);
10352
10353                 /* Setup the "secret" hash key. */
10354                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10355                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10356                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10357                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10358                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10359                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10360                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10361                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10362                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10363                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10364         }
10365
10366         tp->rx_mode = RX_MODE_ENABLE;
10367         if (tg3_flag(tp, 5755_PLUS))
10368                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10369
10370         if (tg3_flag(tp, ENABLE_RSS))
10371                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10372                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10373                                RX_MODE_RSS_IPV6_HASH_EN |
10374                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10375                                RX_MODE_RSS_IPV4_HASH_EN |
10376                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10377
10378         tw32_f(MAC_RX_MODE, tp->rx_mode);
10379         udelay(10);
10380
10381         tw32(MAC_LED_CTRL, tp->led_ctrl);
10382
10383         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10384         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10385                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10386                 udelay(10);
10387         }
10388         tw32_f(MAC_RX_MODE, tp->rx_mode);
10389         udelay(10);
10390
10391         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10392                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10393                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10394                         /* Set drive transmission level to 1.2V  */
10395                         /* only if the signal pre-emphasis bit is not set  */
10396                         val = tr32(MAC_SERDES_CFG);
10397                         val &= 0xfffff000;
10398                         val |= 0x880;
10399                         tw32(MAC_SERDES_CFG, val);
10400                 }
10401                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10402                         tw32(MAC_SERDES_CFG, 0x616000);
10403         }
10404
10405         /* Prevent chip from dropping frames when flow control
10406          * is enabled.
10407          */
10408         if (tg3_flag(tp, 57765_CLASS))
10409                 val = 1;
10410         else
10411                 val = 2;
10412         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10413
10414         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10415             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10416                 /* Use hardware link auto-negotiation */
10417                 tg3_flag_set(tp, HW_AUTONEG);
10418         }
10419
10420         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10421             tg3_asic_rev(tp) == ASIC_REV_5714) {
10422                 u32 tmp;
10423
10424                 tmp = tr32(SERDES_RX_CTRL);
10425                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10426                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10427                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10428                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10429         }
10430
10431         if (!tg3_flag(tp, USE_PHYLIB)) {
10432                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10433                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10434
10435                 err = tg3_setup_phy(tp, false);
10436                 if (err)
10437                         return err;
10438
10439                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10440                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10441                         u32 tmp;
10442
10443                         /* Clear CRC stats. */
10444                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10445                                 tg3_writephy(tp, MII_TG3_TEST1,
10446                                              tmp | MII_TG3_TEST1_CRC_EN);
10447                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10448                         }
10449                 }
10450         }
10451
10452         __tg3_set_rx_mode(tp->dev);
10453
10454         /* Initialize receive rules. */
10455         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10456         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10457         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10458         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10459
10460         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10461                 limit = 8;
10462         else
10463                 limit = 16;
10464         if (tg3_flag(tp, ENABLE_ASF))
10465                 limit -= 4;
10466         switch (limit) {
10467         case 16:
10468                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10469         case 15:
10470                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10471         case 14:
10472                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10473         case 13:
10474                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10475         case 12:
10476                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10477         case 11:
10478                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10479         case 10:
10480                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10481         case 9:
10482                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10483         case 8:
10484                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10485         case 7:
10486                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10487         case 6:
10488                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10489         case 5:
10490                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10491         case 4:
10492                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10493         case 3:
10494                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10495         case 2:
10496         case 1:
10497
10498         default:
10499                 break;
10500         }
10501
10502         if (tg3_flag(tp, ENABLE_APE))
10503                 /* Write our heartbeat update interval to APE. */
10504                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10505                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10506
10507         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10508
10509         return 0;
10510 }
10511
10512 /* Called at device open time to get the chip ready for
10513  * packet processing.  Invoked with tp->lock held.
10514  */
10515 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10516 {
10517         /* Chip may have been just powered on. If so, the boot code may still
10518          * be running initialization. Wait for it to finish to avoid races in
10519          * accessing the hardware.
10520          */
10521         tg3_enable_register_access(tp);
10522         tg3_poll_fw(tp);
10523
10524         tg3_switch_clocks(tp);
10525
10526         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10527
10528         return tg3_reset_hw(tp, reset_phy);
10529 }
10530
10531 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10532 {
10533         int i;
10534
10535         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10536                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10537
10538                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10539                 off += len;
10540
10541                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10542                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10543                         memset(ocir, 0, TG3_OCIR_LEN);
10544         }
10545 }
10546
10547 /* sysfs attributes for hwmon */
10548 static ssize_t tg3_show_temp(struct device *dev,
10549                              struct device_attribute *devattr, char *buf)
10550 {
10551         struct pci_dev *pdev = to_pci_dev(dev);
10552         struct net_device *netdev = pci_get_drvdata(pdev);
10553         struct tg3 *tp = netdev_priv(netdev);
10554         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10555         u32 temperature;
10556
10557         spin_lock_bh(&tp->lock);
10558         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10559                                 sizeof(temperature));
10560         spin_unlock_bh(&tp->lock);
10561         return sprintf(buf, "%u\n", temperature);
10562 }
10563
10564
10565 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10566                           TG3_TEMP_SENSOR_OFFSET);
10567 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10568                           TG3_TEMP_CAUTION_OFFSET);
10569 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10570                           TG3_TEMP_MAX_OFFSET);
10571
10572 static struct attribute *tg3_attributes[] = {
10573         &sensor_dev_attr_temp1_input.dev_attr.attr,
10574         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10575         &sensor_dev_attr_temp1_max.dev_attr.attr,
10576         NULL
10577 };
10578
10579 static const struct attribute_group tg3_group = {
10580         .attrs = tg3_attributes,
10581 };
10582
10583 static void tg3_hwmon_close(struct tg3 *tp)
10584 {
10585         if (tp->hwmon_dev) {
10586                 hwmon_device_unregister(tp->hwmon_dev);
10587                 tp->hwmon_dev = NULL;
10588                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10589         }
10590 }
10591
10592 static void tg3_hwmon_open(struct tg3 *tp)
10593 {
10594         int i, err;
10595         u32 size = 0;
10596         struct pci_dev *pdev = tp->pdev;
10597         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10598
10599         tg3_sd_scan_scratchpad(tp, ocirs);
10600
10601         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10602                 if (!ocirs[i].src_data_length)
10603                         continue;
10604
10605                 size += ocirs[i].src_hdr_length;
10606                 size += ocirs[i].src_data_length;
10607         }
10608
10609         if (!size)
10610                 return;
10611
10612         /* Register hwmon sysfs hooks */
10613         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10614         if (err) {
10615                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10616                 return;
10617         }
10618
10619         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10620         if (IS_ERR(tp->hwmon_dev)) {
10621                 tp->hwmon_dev = NULL;
10622                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10623                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10624         }
10625 }
10626
10627
10628 #define TG3_STAT_ADD32(PSTAT, REG) \
10629 do {    u32 __val = tr32(REG); \
10630         (PSTAT)->low += __val; \
10631         if ((PSTAT)->low < __val) \
10632                 (PSTAT)->high += 1; \
10633 } while (0)
10634
10635 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10636 {
10637         struct tg3_hw_stats *sp = tp->hw_stats;
10638
10639         if (!tp->link_up)
10640                 return;
10641
10642         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10643         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10644         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10645         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10646         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10647         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10648         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10649         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10650         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10651         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10652         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10653         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10654         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10655         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10656                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10657                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10658                 u32 val;
10659
10660                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10661                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10662                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10663                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10664         }
10665
10666         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10667         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10668         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10669         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10670         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10671         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10672         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10673         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10674         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10675         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10676         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10677         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10678         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10679         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10680
10681         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10682         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10683             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10684             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10685                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10686         } else {
10687                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10688                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10689                 if (val) {
10690                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10691                         sp->rx_discards.low += val;
10692                         if (sp->rx_discards.low < val)
10693                                 sp->rx_discards.high += 1;
10694                 }
10695                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10696         }
10697         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10698 }
10699
10700 static void tg3_chk_missed_msi(struct tg3 *tp)
10701 {
10702         u32 i;
10703
10704         for (i = 0; i < tp->irq_cnt; i++) {
10705                 struct tg3_napi *tnapi = &tp->napi[i];
10706
10707                 if (tg3_has_work(tnapi)) {
10708                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10709                             tnapi->last_tx_cons == tnapi->tx_cons) {
10710                                 if (tnapi->chk_msi_cnt < 1) {
10711                                         tnapi->chk_msi_cnt++;
10712                                         return;
10713                                 }
10714                                 tg3_msi(0, tnapi);
10715                         }
10716                 }
10717                 tnapi->chk_msi_cnt = 0;
10718                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10719                 tnapi->last_tx_cons = tnapi->tx_cons;
10720         }
10721 }
10722
10723 static void tg3_timer(unsigned long __opaque)
10724 {
10725         struct tg3 *tp = (struct tg3 *) __opaque;
10726
10727         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10728                 goto restart_timer;
10729
10730         spin_lock(&tp->lock);
10731
10732         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10733             tg3_flag(tp, 57765_CLASS))
10734                 tg3_chk_missed_msi(tp);
10735
10736         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10737                 /* BCM4785: Flush posted writes from GbE to host memory. */
10738                 tr32(HOSTCC_MODE);
10739         }
10740
10741         if (!tg3_flag(tp, TAGGED_STATUS)) {
10742                 /* All of this garbage is because when using non-tagged
10743                  * IRQ status the mailbox/status_block protocol the chip
10744                  * uses with the cpu is race prone.
10745                  */
10746                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10747                         tw32(GRC_LOCAL_CTRL,
10748                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10749                 } else {
10750                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10751                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10752                 }
10753
10754                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10755                         spin_unlock(&tp->lock);
10756                         tg3_reset_task_schedule(tp);
10757                         goto restart_timer;
10758                 }
10759         }
10760
10761         /* This part only runs once per second. */
10762         if (!--tp->timer_counter) {
10763                 if (tg3_flag(tp, 5705_PLUS))
10764                         tg3_periodic_fetch_stats(tp);
10765
10766                 if (tp->setlpicnt && !--tp->setlpicnt)
10767                         tg3_phy_eee_enable(tp);
10768
10769                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10770                         u32 mac_stat;
10771                         int phy_event;
10772
10773                         mac_stat = tr32(MAC_STATUS);
10774
10775                         phy_event = 0;
10776                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10777                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10778                                         phy_event = 1;
10779                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10780                                 phy_event = 1;
10781
10782                         if (phy_event)
10783                                 tg3_setup_phy(tp, false);
10784                 } else if (tg3_flag(tp, POLL_SERDES)) {
10785                         u32 mac_stat = tr32(MAC_STATUS);
10786                         int need_setup = 0;
10787
10788                         if (tp->link_up &&
10789                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10790                                 need_setup = 1;
10791                         }
10792                         if (!tp->link_up &&
10793                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10794                                          MAC_STATUS_SIGNAL_DET))) {
10795                                 need_setup = 1;
10796                         }
10797                         if (need_setup) {
10798                                 if (!tp->serdes_counter) {
10799                                         tw32_f(MAC_MODE,
10800                                              (tp->mac_mode &
10801                                               ~MAC_MODE_PORT_MODE_MASK));
10802                                         udelay(40);
10803                                         tw32_f(MAC_MODE, tp->mac_mode);
10804                                         udelay(40);
10805                                 }
10806                                 tg3_setup_phy(tp, false);
10807                         }
10808                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10809                            tg3_flag(tp, 5780_CLASS)) {
10810                         tg3_serdes_parallel_detect(tp);
10811                 }
10812
10813                 tp->timer_counter = tp->timer_multiplier;
10814         }
10815
10816         /* Heartbeat is only sent once every 2 seconds.
10817          *
10818          * The heartbeat is to tell the ASF firmware that the host
10819          * driver is still alive.  In the event that the OS crashes,
10820          * ASF needs to reset the hardware to free up the FIFO space
10821          * that may be filled with rx packets destined for the host.
10822          * If the FIFO is full, ASF will no longer function properly.
10823          *
10824          * Unintended resets have been reported on real time kernels
10825          * where the timer doesn't run on time.  Netpoll will also have
10826          * same problem.
10827          *
10828          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10829          * to check the ring condition when the heartbeat is expiring
10830          * before doing the reset.  This will prevent most unintended
10831          * resets.
10832          */
10833         if (!--tp->asf_counter) {
10834                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10835                         tg3_wait_for_event_ack(tp);
10836
10837                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10838                                       FWCMD_NICDRV_ALIVE3);
10839                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10840                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10841                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10842
10843                         tg3_generate_fw_event(tp);
10844                 }
10845                 tp->asf_counter = tp->asf_multiplier;
10846         }
10847
10848         spin_unlock(&tp->lock);
10849
10850 restart_timer:
10851         tp->timer.expires = jiffies + tp->timer_offset;
10852         add_timer(&tp->timer);
10853 }
10854
10855 static void tg3_timer_init(struct tg3 *tp)
10856 {
10857         if (tg3_flag(tp, TAGGED_STATUS) &&
10858             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10859             !tg3_flag(tp, 57765_CLASS))
10860                 tp->timer_offset = HZ;
10861         else
10862                 tp->timer_offset = HZ / 10;
10863
10864         BUG_ON(tp->timer_offset > HZ);
10865
10866         tp->timer_multiplier = (HZ / tp->timer_offset);
10867         tp->asf_multiplier = (HZ / tp->timer_offset) *
10868                              TG3_FW_UPDATE_FREQ_SEC;
10869
10870         init_timer(&tp->timer);
10871         tp->timer.data = (unsigned long) tp;
10872         tp->timer.function = tg3_timer;
10873 }
10874
10875 static void tg3_timer_start(struct tg3 *tp)
10876 {
10877         tp->asf_counter   = tp->asf_multiplier;
10878         tp->timer_counter = tp->timer_multiplier;
10879
10880         tp->timer.expires = jiffies + tp->timer_offset;
10881         add_timer(&tp->timer);
10882 }
10883
10884 static void tg3_timer_stop(struct tg3 *tp)
10885 {
10886         del_timer_sync(&tp->timer);
10887 }
10888
10889 /* Restart hardware after configuration changes, self-test, etc.
10890  * Invoked with tp->lock held.
10891  */
10892 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10893         __releases(tp->lock)
10894         __acquires(tp->lock)
10895 {
10896         int err;
10897
10898         err = tg3_init_hw(tp, reset_phy);
10899         if (err) {
10900                 netdev_err(tp->dev,
10901                            "Failed to re-initialize device, aborting\n");
10902                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10903                 tg3_full_unlock(tp);
10904                 tg3_timer_stop(tp);
10905                 tp->irq_sync = 0;
10906                 tg3_napi_enable(tp);
10907                 dev_close(tp->dev);
10908                 tg3_full_lock(tp, 0);
10909         }
10910         return err;
10911 }
10912
10913 static void tg3_reset_task(struct work_struct *work)
10914 {
10915         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10916         int err;
10917
10918         tg3_full_lock(tp, 0);
10919
10920         if (!netif_running(tp->dev)) {
10921                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10922                 tg3_full_unlock(tp);
10923                 return;
10924         }
10925
10926         tg3_full_unlock(tp);
10927
10928         tg3_phy_stop(tp);
10929
10930         tg3_netif_stop(tp);
10931
10932         tg3_full_lock(tp, 1);
10933
10934         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10935                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10936                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10937                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10938                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10939         }
10940
10941         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10942         err = tg3_init_hw(tp, true);
10943         if (err)
10944                 goto out;
10945
10946         tg3_netif_start(tp);
10947
10948 out:
10949         tg3_full_unlock(tp);
10950
10951         if (!err)
10952                 tg3_phy_start(tp);
10953
10954         tg3_flag_clear(tp, RESET_TASK_PENDING);
10955 }
10956
10957 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10958 {
10959         irq_handler_t fn;
10960         unsigned long flags;
10961         char *name;
10962         struct tg3_napi *tnapi = &tp->napi[irq_num];
10963
10964         if (tp->irq_cnt == 1)
10965                 name = tp->dev->name;
10966         else {
10967                 name = &tnapi->irq_lbl[0];
10968                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10969                 name[IFNAMSIZ-1] = 0;
10970         }
10971
10972         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10973                 fn = tg3_msi;
10974                 if (tg3_flag(tp, 1SHOT_MSI))
10975                         fn = tg3_msi_1shot;
10976                 flags = 0;
10977         } else {
10978                 fn = tg3_interrupt;
10979                 if (tg3_flag(tp, TAGGED_STATUS))
10980                         fn = tg3_interrupt_tagged;
10981                 flags = IRQF_SHARED;
10982         }
10983
10984         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10985 }
10986
10987 static int tg3_test_interrupt(struct tg3 *tp)
10988 {
10989         struct tg3_napi *tnapi = &tp->napi[0];
10990         struct net_device *dev = tp->dev;
10991         int err, i, intr_ok = 0;
10992         u32 val;
10993
10994         if (!netif_running(dev))
10995                 return -ENODEV;
10996
10997         tg3_disable_ints(tp);
10998
10999         free_irq(tnapi->irq_vec, tnapi);
11000
11001         /*
11002          * Turn off MSI one shot mode.  Otherwise this test has no
11003          * observable way to know whether the interrupt was delivered.
11004          */
11005         if (tg3_flag(tp, 57765_PLUS)) {
11006                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11007                 tw32(MSGINT_MODE, val);
11008         }
11009
11010         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11011                           IRQF_SHARED, dev->name, tnapi);
11012         if (err)
11013                 return err;
11014
11015         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11016         tg3_enable_ints(tp);
11017
11018         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11019                tnapi->coal_now);
11020
11021         for (i = 0; i < 5; i++) {
11022                 u32 int_mbox, misc_host_ctrl;
11023
11024                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11025                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11026
11027                 if ((int_mbox != 0) ||
11028                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11029                         intr_ok = 1;
11030                         break;
11031                 }
11032
11033                 if (tg3_flag(tp, 57765_PLUS) &&
11034                     tnapi->hw_status->status_tag != tnapi->last_tag)
11035                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11036
11037                 msleep(10);
11038         }
11039
11040         tg3_disable_ints(tp);
11041
11042         free_irq(tnapi->irq_vec, tnapi);
11043
11044         err = tg3_request_irq(tp, 0);
11045
11046         if (err)
11047                 return err;
11048
11049         if (intr_ok) {
11050                 /* Reenable MSI one shot mode. */
11051                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11052                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11053                         tw32(MSGINT_MODE, val);
11054                 }
11055                 return 0;
11056         }
11057
11058         return -EIO;
11059 }
11060
11061 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11062  * successfully restored
11063  */
11064 static int tg3_test_msi(struct tg3 *tp)
11065 {
11066         int err;
11067         u16 pci_cmd;
11068
11069         if (!tg3_flag(tp, USING_MSI))
11070                 return 0;
11071
11072         /* Turn off SERR reporting in case MSI terminates with Master
11073          * Abort.
11074          */
11075         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11076         pci_write_config_word(tp->pdev, PCI_COMMAND,
11077                               pci_cmd & ~PCI_COMMAND_SERR);
11078
11079         err = tg3_test_interrupt(tp);
11080
11081         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11082
11083         if (!err)
11084                 return 0;
11085
11086         /* other failures */
11087         if (err != -EIO)
11088                 return err;
11089
11090         /* MSI test failed, go back to INTx mode */
11091         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11092                     "to INTx mode. Please report this failure to the PCI "
11093                     "maintainer and include system chipset information\n");
11094
11095         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11096
11097         pci_disable_msi(tp->pdev);
11098
11099         tg3_flag_clear(tp, USING_MSI);
11100         tp->napi[0].irq_vec = tp->pdev->irq;
11101
11102         err = tg3_request_irq(tp, 0);
11103         if (err)
11104                 return err;
11105
11106         /* Need to reset the chip because the MSI cycle may have terminated
11107          * with Master Abort.
11108          */
11109         tg3_full_lock(tp, 1);
11110
11111         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11112         err = tg3_init_hw(tp, true);
11113
11114         tg3_full_unlock(tp);
11115
11116         if (err)
11117                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11118
11119         return err;
11120 }
11121
11122 static int tg3_request_firmware(struct tg3 *tp)
11123 {
11124         const struct tg3_firmware_hdr *fw_hdr;
11125
11126         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11127                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11128                            tp->fw_needed);
11129                 return -ENOENT;
11130         }
11131
11132         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11133
11134         /* Firmware blob starts with version numbers, followed by
11135          * start address and _full_ length including BSS sections
11136          * (which must be longer than the actual data, of course
11137          */
11138
11139         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11140         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11141                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11142                            tp->fw_len, tp->fw_needed);
11143                 release_firmware(tp->fw);
11144                 tp->fw = NULL;
11145                 return -EINVAL;
11146         }
11147
11148         /* We no longer need firmware; we have it. */
11149         tp->fw_needed = NULL;
11150         return 0;
11151 }
11152
11153 static u32 tg3_irq_count(struct tg3 *tp)
11154 {
11155         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11156
11157         if (irq_cnt > 1) {
11158                 /* We want as many rx rings enabled as there are cpus.
11159                  * In multiqueue MSI-X mode, the first MSI-X vector
11160                  * only deals with link interrupts, etc, so we add
11161                  * one to the number of vectors we are requesting.
11162                  */
11163                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11164         }
11165
11166         return irq_cnt;
11167 }
11168
11169 static bool tg3_enable_msix(struct tg3 *tp)
11170 {
11171         int i, rc;
11172         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11173
11174         tp->txq_cnt = tp->txq_req;
11175         tp->rxq_cnt = tp->rxq_req;
11176         if (!tp->rxq_cnt)
11177                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11178         if (tp->rxq_cnt > tp->rxq_max)
11179                 tp->rxq_cnt = tp->rxq_max;
11180
11181         /* Disable multiple TX rings by default.  Simple round-robin hardware
11182          * scheduling of the TX rings can cause starvation of rings with
11183          * small packets when other rings have TSO or jumbo packets.
11184          */
11185         if (!tp->txq_req)
11186                 tp->txq_cnt = 1;
11187
11188         tp->irq_cnt = tg3_irq_count(tp);
11189
11190         for (i = 0; i < tp->irq_max; i++) {
11191                 msix_ent[i].entry  = i;
11192                 msix_ent[i].vector = 0;
11193         }
11194
11195         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11196         if (rc < 0) {
11197                 return false;
11198         } else if (rc != 0) {
11199                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11200                         return false;
11201                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11202                               tp->irq_cnt, rc);
11203                 tp->irq_cnt = rc;
11204                 tp->rxq_cnt = max(rc - 1, 1);
11205                 if (tp->txq_cnt)
11206                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11207         }
11208
11209         for (i = 0; i < tp->irq_max; i++)
11210                 tp->napi[i].irq_vec = msix_ent[i].vector;
11211
11212         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11213                 pci_disable_msix(tp->pdev);
11214                 return false;
11215         }
11216
11217         if (tp->irq_cnt == 1)
11218                 return true;
11219
11220         tg3_flag_set(tp, ENABLE_RSS);
11221
11222         if (tp->txq_cnt > 1)
11223                 tg3_flag_set(tp, ENABLE_TSS);
11224
11225         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11226
11227         return true;
11228 }
11229
11230 static void tg3_ints_init(struct tg3 *tp)
11231 {
11232         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11233             !tg3_flag(tp, TAGGED_STATUS)) {
11234                 /* All MSI supporting chips should support tagged
11235                  * status.  Assert that this is the case.
11236                  */
11237                 netdev_warn(tp->dev,
11238                             "MSI without TAGGED_STATUS? Not using MSI\n");
11239                 goto defcfg;
11240         }
11241
11242         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11243                 tg3_flag_set(tp, USING_MSIX);
11244         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11245                 tg3_flag_set(tp, USING_MSI);
11246
11247         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11248                 u32 msi_mode = tr32(MSGINT_MODE);
11249                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11250                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11251                 if (!tg3_flag(tp, 1SHOT_MSI))
11252                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11253                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11254         }
11255 defcfg:
11256         if (!tg3_flag(tp, USING_MSIX)) {
11257                 tp->irq_cnt = 1;
11258                 tp->napi[0].irq_vec = tp->pdev->irq;
11259         }
11260
11261         if (tp->irq_cnt == 1) {
11262                 tp->txq_cnt = 1;
11263                 tp->rxq_cnt = 1;
11264                 netif_set_real_num_tx_queues(tp->dev, 1);
11265                 netif_set_real_num_rx_queues(tp->dev, 1);
11266         }
11267 }
11268
11269 static void tg3_ints_fini(struct tg3 *tp)
11270 {
11271         if (tg3_flag(tp, USING_MSIX))
11272                 pci_disable_msix(tp->pdev);
11273         else if (tg3_flag(tp, USING_MSI))
11274                 pci_disable_msi(tp->pdev);
11275         tg3_flag_clear(tp, USING_MSI);
11276         tg3_flag_clear(tp, USING_MSIX);
11277         tg3_flag_clear(tp, ENABLE_RSS);
11278         tg3_flag_clear(tp, ENABLE_TSS);
11279 }
11280
11281 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11282                      bool init)
11283 {
11284         struct net_device *dev = tp->dev;
11285         int i, err;
11286
11287         /*
11288          * Setup interrupts first so we know how
11289          * many NAPI resources to allocate
11290          */
11291         tg3_ints_init(tp);
11292
11293         tg3_rss_check_indir_tbl(tp);
11294
11295         /* The placement of this call is tied
11296          * to the setup and use of Host TX descriptors.
11297          */
11298         err = tg3_alloc_consistent(tp);
11299         if (err)
11300                 goto out_ints_fini;
11301
11302         tg3_napi_init(tp);
11303
11304         tg3_napi_enable(tp);
11305
11306         for (i = 0; i < tp->irq_cnt; i++) {
11307                 struct tg3_napi *tnapi = &tp->napi[i];
11308                 err = tg3_request_irq(tp, i);
11309                 if (err) {
11310                         for (i--; i >= 0; i--) {
11311                                 tnapi = &tp->napi[i];
11312                                 free_irq(tnapi->irq_vec, tnapi);
11313                         }
11314                         goto out_napi_fini;
11315                 }
11316         }
11317
11318         tg3_full_lock(tp, 0);
11319
11320         if (init)
11321                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11322
11323         err = tg3_init_hw(tp, reset_phy);
11324         if (err) {
11325                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11326                 tg3_free_rings(tp);
11327         }
11328
11329         tg3_full_unlock(tp);
11330
11331         if (err)
11332                 goto out_free_irq;
11333
11334         if (test_irq && tg3_flag(tp, USING_MSI)) {
11335                 err = tg3_test_msi(tp);
11336
11337                 if (err) {
11338                         tg3_full_lock(tp, 0);
11339                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11340                         tg3_free_rings(tp);
11341                         tg3_full_unlock(tp);
11342
11343                         goto out_napi_fini;
11344                 }
11345
11346                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11347                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11348
11349                         tw32(PCIE_TRANSACTION_CFG,
11350                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11351                 }
11352         }
11353
11354         tg3_phy_start(tp);
11355
11356         tg3_hwmon_open(tp);
11357
11358         tg3_full_lock(tp, 0);
11359
11360         tg3_timer_start(tp);
11361         tg3_flag_set(tp, INIT_COMPLETE);
11362         tg3_enable_ints(tp);
11363
11364         if (init)
11365                 tg3_ptp_init(tp);
11366         else
11367                 tg3_ptp_resume(tp);
11368
11369
11370         tg3_full_unlock(tp);
11371
11372         netif_tx_start_all_queues(dev);
11373
11374         /*
11375          * Reset loopback feature if it was turned on while the device was down
11376          * make sure that it's installed properly now.
11377          */
11378         if (dev->features & NETIF_F_LOOPBACK)
11379                 tg3_set_loopback(dev, dev->features);
11380
11381         return 0;
11382
11383 out_free_irq:
11384         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11385                 struct tg3_napi *tnapi = &tp->napi[i];
11386                 free_irq(tnapi->irq_vec, tnapi);
11387         }
11388
11389 out_napi_fini:
11390         tg3_napi_disable(tp);
11391         tg3_napi_fini(tp);
11392         tg3_free_consistent(tp);
11393
11394 out_ints_fini:
11395         tg3_ints_fini(tp);
11396
11397         return err;
11398 }
11399
11400 static void tg3_stop(struct tg3 *tp)
11401 {
11402         int i;
11403
11404         tg3_reset_task_cancel(tp);
11405         tg3_netif_stop(tp);
11406
11407         tg3_timer_stop(tp);
11408
11409         tg3_hwmon_close(tp);
11410
11411         tg3_phy_stop(tp);
11412
11413         tg3_full_lock(tp, 1);
11414
11415         tg3_disable_ints(tp);
11416
11417         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11418         tg3_free_rings(tp);
11419         tg3_flag_clear(tp, INIT_COMPLETE);
11420
11421         tg3_full_unlock(tp);
11422
11423         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11424                 struct tg3_napi *tnapi = &tp->napi[i];
11425                 free_irq(tnapi->irq_vec, tnapi);
11426         }
11427
11428         tg3_ints_fini(tp);
11429
11430         tg3_napi_fini(tp);
11431
11432         tg3_free_consistent(tp);
11433 }
11434
11435 static int tg3_open(struct net_device *dev)
11436 {
11437         struct tg3 *tp = netdev_priv(dev);
11438         int err;
11439
11440         if (tp->fw_needed) {
11441                 err = tg3_request_firmware(tp);
11442                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11443                         if (err) {
11444                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11445                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11446                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11447                                 netdev_warn(tp->dev, "EEE capability restored\n");
11448                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11449                         }
11450                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11451                         if (err)
11452                                 return err;
11453                 } else if (err) {
11454                         netdev_warn(tp->dev, "TSO capability disabled\n");
11455                         tg3_flag_clear(tp, TSO_CAPABLE);
11456                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11457                         netdev_notice(tp->dev, "TSO capability restored\n");
11458                         tg3_flag_set(tp, TSO_CAPABLE);
11459                 }
11460         }
11461
11462         tg3_carrier_off(tp);
11463
11464         err = tg3_power_up(tp);
11465         if (err)
11466                 return err;
11467
11468         tg3_full_lock(tp, 0);
11469
11470         tg3_disable_ints(tp);
11471         tg3_flag_clear(tp, INIT_COMPLETE);
11472
11473         tg3_full_unlock(tp);
11474
11475         err = tg3_start(tp,
11476                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11477                         true, true);
11478         if (err) {
11479                 tg3_frob_aux_power(tp, false);
11480                 pci_set_power_state(tp->pdev, PCI_D3hot);
11481         }
11482
11483         if (tg3_flag(tp, PTP_CAPABLE)) {
11484                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11485                                                    &tp->pdev->dev);
11486                 if (IS_ERR(tp->ptp_clock))
11487                         tp->ptp_clock = NULL;
11488         }
11489
11490         return err;
11491 }
11492
11493 static int tg3_close(struct net_device *dev)
11494 {
11495         struct tg3 *tp = netdev_priv(dev);
11496
11497         tg3_ptp_fini(tp);
11498
11499         tg3_stop(tp);
11500
11501         /* Clear stats across close / open calls */
11502         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11503         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11504
11505         tg3_power_down_prepare(tp);
11506
11507         tg3_carrier_off(tp);
11508
11509         return 0;
11510 }
11511
11512 static inline u64 get_stat64(tg3_stat64_t *val)
11513 {
11514        return ((u64)val->high << 32) | ((u64)val->low);
11515 }
11516
11517 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11518 {
11519         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11520
11521         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11522             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11523              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11524                 u32 val;
11525
11526                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11527                         tg3_writephy(tp, MII_TG3_TEST1,
11528                                      val | MII_TG3_TEST1_CRC_EN);
11529                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11530                 } else
11531                         val = 0;
11532
11533                 tp->phy_crc_errors += val;
11534
11535                 return tp->phy_crc_errors;
11536         }
11537
11538         return get_stat64(&hw_stats->rx_fcs_errors);
11539 }
11540
11541 #define ESTAT_ADD(member) \
11542         estats->member =        old_estats->member + \
11543                                 get_stat64(&hw_stats->member)
11544
11545 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11546 {
11547         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11548         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11549
11550         ESTAT_ADD(rx_octets);
11551         ESTAT_ADD(rx_fragments);
11552         ESTAT_ADD(rx_ucast_packets);
11553         ESTAT_ADD(rx_mcast_packets);
11554         ESTAT_ADD(rx_bcast_packets);
11555         ESTAT_ADD(rx_fcs_errors);
11556         ESTAT_ADD(rx_align_errors);
11557         ESTAT_ADD(rx_xon_pause_rcvd);
11558         ESTAT_ADD(rx_xoff_pause_rcvd);
11559         ESTAT_ADD(rx_mac_ctrl_rcvd);
11560         ESTAT_ADD(rx_xoff_entered);
11561         ESTAT_ADD(rx_frame_too_long_errors);
11562         ESTAT_ADD(rx_jabbers);
11563         ESTAT_ADD(rx_undersize_packets);
11564         ESTAT_ADD(rx_in_length_errors);
11565         ESTAT_ADD(rx_out_length_errors);
11566         ESTAT_ADD(rx_64_or_less_octet_packets);
11567         ESTAT_ADD(rx_65_to_127_octet_packets);
11568         ESTAT_ADD(rx_128_to_255_octet_packets);
11569         ESTAT_ADD(rx_256_to_511_octet_packets);
11570         ESTAT_ADD(rx_512_to_1023_octet_packets);
11571         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11572         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11573         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11574         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11575         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11576
11577         ESTAT_ADD(tx_octets);
11578         ESTAT_ADD(tx_collisions);
11579         ESTAT_ADD(tx_xon_sent);
11580         ESTAT_ADD(tx_xoff_sent);
11581         ESTAT_ADD(tx_flow_control);
11582         ESTAT_ADD(tx_mac_errors);
11583         ESTAT_ADD(tx_single_collisions);
11584         ESTAT_ADD(tx_mult_collisions);
11585         ESTAT_ADD(tx_deferred);
11586         ESTAT_ADD(tx_excessive_collisions);
11587         ESTAT_ADD(tx_late_collisions);
11588         ESTAT_ADD(tx_collide_2times);
11589         ESTAT_ADD(tx_collide_3times);
11590         ESTAT_ADD(tx_collide_4times);
11591         ESTAT_ADD(tx_collide_5times);
11592         ESTAT_ADD(tx_collide_6times);
11593         ESTAT_ADD(tx_collide_7times);
11594         ESTAT_ADD(tx_collide_8times);
11595         ESTAT_ADD(tx_collide_9times);
11596         ESTAT_ADD(tx_collide_10times);
11597         ESTAT_ADD(tx_collide_11times);
11598         ESTAT_ADD(tx_collide_12times);
11599         ESTAT_ADD(tx_collide_13times);
11600         ESTAT_ADD(tx_collide_14times);
11601         ESTAT_ADD(tx_collide_15times);
11602         ESTAT_ADD(tx_ucast_packets);
11603         ESTAT_ADD(tx_mcast_packets);
11604         ESTAT_ADD(tx_bcast_packets);
11605         ESTAT_ADD(tx_carrier_sense_errors);
11606         ESTAT_ADD(tx_discards);
11607         ESTAT_ADD(tx_errors);
11608
11609         ESTAT_ADD(dma_writeq_full);
11610         ESTAT_ADD(dma_write_prioq_full);
11611         ESTAT_ADD(rxbds_empty);
11612         ESTAT_ADD(rx_discards);
11613         ESTAT_ADD(rx_errors);
11614         ESTAT_ADD(rx_threshold_hit);
11615
11616         ESTAT_ADD(dma_readq_full);
11617         ESTAT_ADD(dma_read_prioq_full);
11618         ESTAT_ADD(tx_comp_queue_full);
11619
11620         ESTAT_ADD(ring_set_send_prod_index);
11621         ESTAT_ADD(ring_status_update);
11622         ESTAT_ADD(nic_irqs);
11623         ESTAT_ADD(nic_avoided_irqs);
11624         ESTAT_ADD(nic_tx_threshold_hit);
11625
11626         ESTAT_ADD(mbuf_lwm_thresh_hit);
11627 }
11628
11629 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11630 {
11631         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11632         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11633
11634         stats->rx_packets = old_stats->rx_packets +
11635                 get_stat64(&hw_stats->rx_ucast_packets) +
11636                 get_stat64(&hw_stats->rx_mcast_packets) +
11637                 get_stat64(&hw_stats->rx_bcast_packets);
11638
11639         stats->tx_packets = old_stats->tx_packets +
11640                 get_stat64(&hw_stats->tx_ucast_packets) +
11641                 get_stat64(&hw_stats->tx_mcast_packets) +
11642                 get_stat64(&hw_stats->tx_bcast_packets);
11643
11644         stats->rx_bytes = old_stats->rx_bytes +
11645                 get_stat64(&hw_stats->rx_octets);
11646         stats->tx_bytes = old_stats->tx_bytes +
11647                 get_stat64(&hw_stats->tx_octets);
11648
11649         stats->rx_errors = old_stats->rx_errors +
11650                 get_stat64(&hw_stats->rx_errors);
11651         stats->tx_errors = old_stats->tx_errors +
11652                 get_stat64(&hw_stats->tx_errors) +
11653                 get_stat64(&hw_stats->tx_mac_errors) +
11654                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11655                 get_stat64(&hw_stats->tx_discards);
11656
11657         stats->multicast = old_stats->multicast +
11658                 get_stat64(&hw_stats->rx_mcast_packets);
11659         stats->collisions = old_stats->collisions +
11660                 get_stat64(&hw_stats->tx_collisions);
11661
11662         stats->rx_length_errors = old_stats->rx_length_errors +
11663                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11664                 get_stat64(&hw_stats->rx_undersize_packets);
11665
11666         stats->rx_over_errors = old_stats->rx_over_errors +
11667                 get_stat64(&hw_stats->rxbds_empty);
11668         stats->rx_frame_errors = old_stats->rx_frame_errors +
11669                 get_stat64(&hw_stats->rx_align_errors);
11670         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11671                 get_stat64(&hw_stats->tx_discards);
11672         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11673                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11674
11675         stats->rx_crc_errors = old_stats->rx_crc_errors +
11676                 tg3_calc_crc_errors(tp);
11677
11678         stats->rx_missed_errors = old_stats->rx_missed_errors +
11679                 get_stat64(&hw_stats->rx_discards);
11680
11681         stats->rx_dropped = tp->rx_dropped;
11682         stats->tx_dropped = tp->tx_dropped;
11683 }
11684
11685 static int tg3_get_regs_len(struct net_device *dev)
11686 {
11687         return TG3_REG_BLK_SIZE;
11688 }
11689
11690 static void tg3_get_regs(struct net_device *dev,
11691                 struct ethtool_regs *regs, void *_p)
11692 {
11693         struct tg3 *tp = netdev_priv(dev);
11694
11695         regs->version = 0;
11696
11697         memset(_p, 0, TG3_REG_BLK_SIZE);
11698
11699         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11700                 return;
11701
11702         tg3_full_lock(tp, 0);
11703
11704         tg3_dump_legacy_regs(tp, (u32 *)_p);
11705
11706         tg3_full_unlock(tp);
11707 }
11708
11709 static int tg3_get_eeprom_len(struct net_device *dev)
11710 {
11711         struct tg3 *tp = netdev_priv(dev);
11712
11713         return tp->nvram_size;
11714 }
11715
11716 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11717 {
11718         struct tg3 *tp = netdev_priv(dev);
11719         int ret;
11720         u8  *pd;
11721         u32 i, offset, len, b_offset, b_count;
11722         __be32 val;
11723
11724         if (tg3_flag(tp, NO_NVRAM))
11725                 return -EINVAL;
11726
11727         offset = eeprom->offset;
11728         len = eeprom->len;
11729         eeprom->len = 0;
11730
11731         eeprom->magic = TG3_EEPROM_MAGIC;
11732
11733         if (offset & 3) {
11734                 /* adjustments to start on required 4 byte boundary */
11735                 b_offset = offset & 3;
11736                 b_count = 4 - b_offset;
11737                 if (b_count > len) {
11738                         /* i.e. offset=1 len=2 */
11739                         b_count = len;
11740                 }
11741                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11742                 if (ret)
11743                         return ret;
11744                 memcpy(data, ((char *)&val) + b_offset, b_count);
11745                 len -= b_count;
11746                 offset += b_count;
11747                 eeprom->len += b_count;
11748         }
11749
11750         /* read bytes up to the last 4 byte boundary */
11751         pd = &data[eeprom->len];
11752         for (i = 0; i < (len - (len & 3)); i += 4) {
11753                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11754                 if (ret) {
11755                         eeprom->len += i;
11756                         return ret;
11757                 }
11758                 memcpy(pd + i, &val, 4);
11759         }
11760         eeprom->len += i;
11761
11762         if (len & 3) {
11763                 /* read last bytes not ending on 4 byte boundary */
11764                 pd = &data[eeprom->len];
11765                 b_count = len & 3;
11766                 b_offset = offset + len - b_count;
11767                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11768                 if (ret)
11769                         return ret;
11770                 memcpy(pd, &val, b_count);
11771                 eeprom->len += b_count;
11772         }
11773         return 0;
11774 }
11775
11776 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11777 {
11778         struct tg3 *tp = netdev_priv(dev);
11779         int ret;
11780         u32 offset, len, b_offset, odd_len;
11781         u8 *buf;
11782         __be32 start, end;
11783
11784         if (tg3_flag(tp, NO_NVRAM) ||
11785             eeprom->magic != TG3_EEPROM_MAGIC)
11786                 return -EINVAL;
11787
11788         offset = eeprom->offset;
11789         len = eeprom->len;
11790
11791         if ((b_offset = (offset & 3))) {
11792                 /* adjustments to start on required 4 byte boundary */
11793                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11794                 if (ret)
11795                         return ret;
11796                 len += b_offset;
11797                 offset &= ~3;
11798                 if (len < 4)
11799                         len = 4;
11800         }
11801
11802         odd_len = 0;
11803         if (len & 3) {
11804                 /* adjustments to end on required 4 byte boundary */
11805                 odd_len = 1;
11806                 len = (len + 3) & ~3;
11807                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11808                 if (ret)
11809                         return ret;
11810         }
11811
11812         buf = data;
11813         if (b_offset || odd_len) {
11814                 buf = kmalloc(len, GFP_KERNEL);
11815                 if (!buf)
11816                         return -ENOMEM;
11817                 if (b_offset)
11818                         memcpy(buf, &start, 4);
11819                 if (odd_len)
11820                         memcpy(buf+len-4, &end, 4);
11821                 memcpy(buf + b_offset, data, eeprom->len);
11822         }
11823
11824         ret = tg3_nvram_write_block(tp, offset, len, buf);
11825
11826         if (buf != data)
11827                 kfree(buf);
11828
11829         return ret;
11830 }
11831
11832 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11833 {
11834         struct tg3 *tp = netdev_priv(dev);
11835
11836         if (tg3_flag(tp, USE_PHYLIB)) {
11837                 struct phy_device *phydev;
11838                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11839                         return -EAGAIN;
11840                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11841                 return phy_ethtool_gset(phydev, cmd);
11842         }
11843
11844         cmd->supported = (SUPPORTED_Autoneg);
11845
11846         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11847                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11848                                    SUPPORTED_1000baseT_Full);
11849
11850         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11851                 cmd->supported |= (SUPPORTED_100baseT_Half |
11852                                   SUPPORTED_100baseT_Full |
11853                                   SUPPORTED_10baseT_Half |
11854                                   SUPPORTED_10baseT_Full |
11855                                   SUPPORTED_TP);
11856                 cmd->port = PORT_TP;
11857         } else {
11858                 cmd->supported |= SUPPORTED_FIBRE;
11859                 cmd->port = PORT_FIBRE;
11860         }
11861
11862         cmd->advertising = tp->link_config.advertising;
11863         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11864                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11865                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11866                                 cmd->advertising |= ADVERTISED_Pause;
11867                         } else {
11868                                 cmd->advertising |= ADVERTISED_Pause |
11869                                                     ADVERTISED_Asym_Pause;
11870                         }
11871                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11872                         cmd->advertising |= ADVERTISED_Asym_Pause;
11873                 }
11874         }
11875         if (netif_running(dev) && tp->link_up) {
11876                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11877                 cmd->duplex = tp->link_config.active_duplex;
11878                 cmd->lp_advertising = tp->link_config.rmt_adv;
11879                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11880                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11881                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11882                         else
11883                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11884                 }
11885         } else {
11886                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11887                 cmd->duplex = DUPLEX_UNKNOWN;
11888                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11889         }
11890         cmd->phy_address = tp->phy_addr;
11891         cmd->transceiver = XCVR_INTERNAL;
11892         cmd->autoneg = tp->link_config.autoneg;
11893         cmd->maxtxpkt = 0;
11894         cmd->maxrxpkt = 0;
11895         return 0;
11896 }
11897
11898 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11899 {
11900         struct tg3 *tp = netdev_priv(dev);
11901         u32 speed = ethtool_cmd_speed(cmd);
11902
11903         if (tg3_flag(tp, USE_PHYLIB)) {
11904                 struct phy_device *phydev;
11905                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11906                         return -EAGAIN;
11907                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11908                 return phy_ethtool_sset(phydev, cmd);
11909         }
11910
11911         if (cmd->autoneg != AUTONEG_ENABLE &&
11912             cmd->autoneg != AUTONEG_DISABLE)
11913                 return -EINVAL;
11914
11915         if (cmd->autoneg == AUTONEG_DISABLE &&
11916             cmd->duplex != DUPLEX_FULL &&
11917             cmd->duplex != DUPLEX_HALF)
11918                 return -EINVAL;
11919
11920         if (cmd->autoneg == AUTONEG_ENABLE) {
11921                 u32 mask = ADVERTISED_Autoneg |
11922                            ADVERTISED_Pause |
11923                            ADVERTISED_Asym_Pause;
11924
11925                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11926                         mask |= ADVERTISED_1000baseT_Half |
11927                                 ADVERTISED_1000baseT_Full;
11928
11929                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11930                         mask |= ADVERTISED_100baseT_Half |
11931                                 ADVERTISED_100baseT_Full |
11932                                 ADVERTISED_10baseT_Half |
11933                                 ADVERTISED_10baseT_Full |
11934                                 ADVERTISED_TP;
11935                 else
11936                         mask |= ADVERTISED_FIBRE;
11937
11938                 if (cmd->advertising & ~mask)
11939                         return -EINVAL;
11940
11941                 mask &= (ADVERTISED_1000baseT_Half |
11942                          ADVERTISED_1000baseT_Full |
11943                          ADVERTISED_100baseT_Half |
11944                          ADVERTISED_100baseT_Full |
11945                          ADVERTISED_10baseT_Half |
11946                          ADVERTISED_10baseT_Full);
11947
11948                 cmd->advertising &= mask;
11949         } else {
11950                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11951                         if (speed != SPEED_1000)
11952                                 return -EINVAL;
11953
11954                         if (cmd->duplex != DUPLEX_FULL)
11955                                 return -EINVAL;
11956                 } else {
11957                         if (speed != SPEED_100 &&
11958                             speed != SPEED_10)
11959                                 return -EINVAL;
11960                 }
11961         }
11962
11963         tg3_full_lock(tp, 0);
11964
11965         tp->link_config.autoneg = cmd->autoneg;
11966         if (cmd->autoneg == AUTONEG_ENABLE) {
11967                 tp->link_config.advertising = (cmd->advertising |
11968                                               ADVERTISED_Autoneg);
11969                 tp->link_config.speed = SPEED_UNKNOWN;
11970                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11971         } else {
11972                 tp->link_config.advertising = 0;
11973                 tp->link_config.speed = speed;
11974                 tp->link_config.duplex = cmd->duplex;
11975         }
11976
11977         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11978
11979         tg3_warn_mgmt_link_flap(tp);
11980
11981         if (netif_running(dev))
11982                 tg3_setup_phy(tp, true);
11983
11984         tg3_full_unlock(tp);
11985
11986         return 0;
11987 }
11988
11989 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11990 {
11991         struct tg3 *tp = netdev_priv(dev);
11992
11993         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11994         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11995         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11996         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11997 }
11998
11999 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12000 {
12001         struct tg3 *tp = netdev_priv(dev);
12002
12003         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12004                 wol->supported = WAKE_MAGIC;
12005         else
12006                 wol->supported = 0;
12007         wol->wolopts = 0;
12008         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12009                 wol->wolopts = WAKE_MAGIC;
12010         memset(&wol->sopass, 0, sizeof(wol->sopass));
12011 }
12012
12013 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12014 {
12015         struct tg3 *tp = netdev_priv(dev);
12016         struct device *dp = &tp->pdev->dev;
12017
12018         if (wol->wolopts & ~WAKE_MAGIC)
12019                 return -EINVAL;
12020         if ((wol->wolopts & WAKE_MAGIC) &&
12021             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12022                 return -EINVAL;
12023
12024         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12025
12026         spin_lock_bh(&tp->lock);
12027         if (device_may_wakeup(dp))
12028                 tg3_flag_set(tp, WOL_ENABLE);
12029         else
12030                 tg3_flag_clear(tp, WOL_ENABLE);
12031         spin_unlock_bh(&tp->lock);
12032
12033         return 0;
12034 }
12035
12036 static u32 tg3_get_msglevel(struct net_device *dev)
12037 {
12038         struct tg3 *tp = netdev_priv(dev);
12039         return tp->msg_enable;
12040 }
12041
12042 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12043 {
12044         struct tg3 *tp = netdev_priv(dev);
12045         tp->msg_enable = value;
12046 }
12047
12048 static int tg3_nway_reset(struct net_device *dev)
12049 {
12050         struct tg3 *tp = netdev_priv(dev);
12051         int r;
12052
12053         if (!netif_running(dev))
12054                 return -EAGAIN;
12055
12056         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12057                 return -EINVAL;
12058
12059         tg3_warn_mgmt_link_flap(tp);
12060
12061         if (tg3_flag(tp, USE_PHYLIB)) {
12062                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12063                         return -EAGAIN;
12064                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12065         } else {
12066                 u32 bmcr;
12067
12068                 spin_lock_bh(&tp->lock);
12069                 r = -EINVAL;
12070                 tg3_readphy(tp, MII_BMCR, &bmcr);
12071                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12072                     ((bmcr & BMCR_ANENABLE) ||
12073                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12074                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12075                                                    BMCR_ANENABLE);
12076                         r = 0;
12077                 }
12078                 spin_unlock_bh(&tp->lock);
12079         }
12080
12081         return r;
12082 }
12083
12084 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12085 {
12086         struct tg3 *tp = netdev_priv(dev);
12087
12088         ering->rx_max_pending = tp->rx_std_ring_mask;
12089         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12090                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12091         else
12092                 ering->rx_jumbo_max_pending = 0;
12093
12094         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12095
12096         ering->rx_pending = tp->rx_pending;
12097         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12098                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12099         else
12100                 ering->rx_jumbo_pending = 0;
12101
12102         ering->tx_pending = tp->napi[0].tx_pending;
12103 }
12104
12105 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12106 {
12107         struct tg3 *tp = netdev_priv(dev);
12108         int i, irq_sync = 0, err = 0;
12109
12110         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12111             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12112             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12113             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12114             (tg3_flag(tp, TSO_BUG) &&
12115              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12116                 return -EINVAL;
12117
12118         if (netif_running(dev)) {
12119                 tg3_phy_stop(tp);
12120                 tg3_netif_stop(tp);
12121                 irq_sync = 1;
12122         }
12123
12124         tg3_full_lock(tp, irq_sync);
12125
12126         tp->rx_pending = ering->rx_pending;
12127
12128         if (tg3_flag(tp, MAX_RXPEND_64) &&
12129             tp->rx_pending > 63)
12130                 tp->rx_pending = 63;
12131         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12132
12133         for (i = 0; i < tp->irq_max; i++)
12134                 tp->napi[i].tx_pending = ering->tx_pending;
12135
12136         if (netif_running(dev)) {
12137                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12138                 err = tg3_restart_hw(tp, false);
12139                 if (!err)
12140                         tg3_netif_start(tp);
12141         }
12142
12143         tg3_full_unlock(tp);
12144
12145         if (irq_sync && !err)
12146                 tg3_phy_start(tp);
12147
12148         return err;
12149 }
12150
12151 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12152 {
12153         struct tg3 *tp = netdev_priv(dev);
12154
12155         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12156
12157         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12158                 epause->rx_pause = 1;
12159         else
12160                 epause->rx_pause = 0;
12161
12162         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12163                 epause->tx_pause = 1;
12164         else
12165                 epause->tx_pause = 0;
12166 }
12167
12168 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12169 {
12170         struct tg3 *tp = netdev_priv(dev);
12171         int err = 0;
12172
12173         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12174                 tg3_warn_mgmt_link_flap(tp);
12175
12176         if (tg3_flag(tp, USE_PHYLIB)) {
12177                 u32 newadv;
12178                 struct phy_device *phydev;
12179
12180                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12181
12182                 if (!(phydev->supported & SUPPORTED_Pause) ||
12183                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12184                      (epause->rx_pause != epause->tx_pause)))
12185                         return -EINVAL;
12186
12187                 tp->link_config.flowctrl = 0;
12188                 if (epause->rx_pause) {
12189                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12190
12191                         if (epause->tx_pause) {
12192                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12193                                 newadv = ADVERTISED_Pause;
12194                         } else
12195                                 newadv = ADVERTISED_Pause |
12196                                          ADVERTISED_Asym_Pause;
12197                 } else if (epause->tx_pause) {
12198                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12199                         newadv = ADVERTISED_Asym_Pause;
12200                 } else
12201                         newadv = 0;
12202
12203                 if (epause->autoneg)
12204                         tg3_flag_set(tp, PAUSE_AUTONEG);
12205                 else
12206                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12207
12208                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12209                         u32 oldadv = phydev->advertising &
12210                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12211                         if (oldadv != newadv) {
12212                                 phydev->advertising &=
12213                                         ~(ADVERTISED_Pause |
12214                                           ADVERTISED_Asym_Pause);
12215                                 phydev->advertising |= newadv;
12216                                 if (phydev->autoneg) {
12217                                         /*
12218                                          * Always renegotiate the link to
12219                                          * inform our link partner of our
12220                                          * flow control settings, even if the
12221                                          * flow control is forced.  Let
12222                                          * tg3_adjust_link() do the final
12223                                          * flow control setup.
12224                                          */
12225                                         return phy_start_aneg(phydev);
12226                                 }
12227                         }
12228
12229                         if (!epause->autoneg)
12230                                 tg3_setup_flow_control(tp, 0, 0);
12231                 } else {
12232                         tp->link_config.advertising &=
12233                                         ~(ADVERTISED_Pause |
12234                                           ADVERTISED_Asym_Pause);
12235                         tp->link_config.advertising |= newadv;
12236                 }
12237         } else {
12238                 int irq_sync = 0;
12239
12240                 if (netif_running(dev)) {
12241                         tg3_netif_stop(tp);
12242                         irq_sync = 1;
12243                 }
12244
12245                 tg3_full_lock(tp, irq_sync);
12246
12247                 if (epause->autoneg)
12248                         tg3_flag_set(tp, PAUSE_AUTONEG);
12249                 else
12250                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12251                 if (epause->rx_pause)
12252                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12253                 else
12254                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12255                 if (epause->tx_pause)
12256                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12257                 else
12258                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12259
12260                 if (netif_running(dev)) {
12261                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12262                         err = tg3_restart_hw(tp, false);
12263                         if (!err)
12264                                 tg3_netif_start(tp);
12265                 }
12266
12267                 tg3_full_unlock(tp);
12268         }
12269
12270         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12271
12272         return err;
12273 }
12274
12275 static int tg3_get_sset_count(struct net_device *dev, int sset)
12276 {
12277         switch (sset) {
12278         case ETH_SS_TEST:
12279                 return TG3_NUM_TEST;
12280         case ETH_SS_STATS:
12281                 return TG3_NUM_STATS;
12282         default:
12283                 return -EOPNOTSUPP;
12284         }
12285 }
12286
12287 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12288                          u32 *rules __always_unused)
12289 {
12290         struct tg3 *tp = netdev_priv(dev);
12291
12292         if (!tg3_flag(tp, SUPPORT_MSIX))
12293                 return -EOPNOTSUPP;
12294
12295         switch (info->cmd) {
12296         case ETHTOOL_GRXRINGS:
12297                 if (netif_running(tp->dev))
12298                         info->data = tp->rxq_cnt;
12299                 else {
12300                         info->data = num_online_cpus();
12301                         if (info->data > TG3_RSS_MAX_NUM_QS)
12302                                 info->data = TG3_RSS_MAX_NUM_QS;
12303                 }
12304
12305                 /* The first interrupt vector only
12306                  * handles link interrupts.
12307                  */
12308                 info->data -= 1;
12309                 return 0;
12310
12311         default:
12312                 return -EOPNOTSUPP;
12313         }
12314 }
12315
12316 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12317 {
12318         u32 size = 0;
12319         struct tg3 *tp = netdev_priv(dev);
12320
12321         if (tg3_flag(tp, SUPPORT_MSIX))
12322                 size = TG3_RSS_INDIR_TBL_SIZE;
12323
12324         return size;
12325 }
12326
12327 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12328 {
12329         struct tg3 *tp = netdev_priv(dev);
12330         int i;
12331
12332         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12333                 indir[i] = tp->rss_ind_tbl[i];
12334
12335         return 0;
12336 }
12337
12338 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12339 {
12340         struct tg3 *tp = netdev_priv(dev);
12341         size_t i;
12342
12343         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12344                 tp->rss_ind_tbl[i] = indir[i];
12345
12346         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12347                 return 0;
12348
12349         /* It is legal to write the indirection
12350          * table while the device is running.
12351          */
12352         tg3_full_lock(tp, 0);
12353         tg3_rss_write_indir_tbl(tp);
12354         tg3_full_unlock(tp);
12355
12356         return 0;
12357 }
12358
12359 static void tg3_get_channels(struct net_device *dev,
12360                              struct ethtool_channels *channel)
12361 {
12362         struct tg3 *tp = netdev_priv(dev);
12363         u32 deflt_qs = netif_get_num_default_rss_queues();
12364
12365         channel->max_rx = tp->rxq_max;
12366         channel->max_tx = tp->txq_max;
12367
12368         if (netif_running(dev)) {
12369                 channel->rx_count = tp->rxq_cnt;
12370                 channel->tx_count = tp->txq_cnt;
12371         } else {
12372                 if (tp->rxq_req)
12373                         channel->rx_count = tp->rxq_req;
12374                 else
12375                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12376
12377                 if (tp->txq_req)
12378                         channel->tx_count = tp->txq_req;
12379                 else
12380                         channel->tx_count = min(deflt_qs, tp->txq_max);
12381         }
12382 }
12383
12384 static int tg3_set_channels(struct net_device *dev,
12385                             struct ethtool_channels *channel)
12386 {
12387         struct tg3 *tp = netdev_priv(dev);
12388
12389         if (!tg3_flag(tp, SUPPORT_MSIX))
12390                 return -EOPNOTSUPP;
12391
12392         if (channel->rx_count > tp->rxq_max ||
12393             channel->tx_count > tp->txq_max)
12394                 return -EINVAL;
12395
12396         tp->rxq_req = channel->rx_count;
12397         tp->txq_req = channel->tx_count;
12398
12399         if (!netif_running(dev))
12400                 return 0;
12401
12402         tg3_stop(tp);
12403
12404         tg3_carrier_off(tp);
12405
12406         tg3_start(tp, true, false, false);
12407
12408         return 0;
12409 }
12410
12411 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12412 {
12413         switch (stringset) {
12414         case ETH_SS_STATS:
12415                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12416                 break;
12417         case ETH_SS_TEST:
12418                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12419                 break;
12420         default:
12421                 WARN_ON(1);     /* we need a WARN() */
12422                 break;
12423         }
12424 }
12425
12426 static int tg3_set_phys_id(struct net_device *dev,
12427                             enum ethtool_phys_id_state state)
12428 {
12429         struct tg3 *tp = netdev_priv(dev);
12430
12431         if (!netif_running(tp->dev))
12432                 return -EAGAIN;
12433
12434         switch (state) {
12435         case ETHTOOL_ID_ACTIVE:
12436                 return 1;       /* cycle on/off once per second */
12437
12438         case ETHTOOL_ID_ON:
12439                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12440                      LED_CTRL_1000MBPS_ON |
12441                      LED_CTRL_100MBPS_ON |
12442                      LED_CTRL_10MBPS_ON |
12443                      LED_CTRL_TRAFFIC_OVERRIDE |
12444                      LED_CTRL_TRAFFIC_BLINK |
12445                      LED_CTRL_TRAFFIC_LED);
12446                 break;
12447
12448         case ETHTOOL_ID_OFF:
12449                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12450                      LED_CTRL_TRAFFIC_OVERRIDE);
12451                 break;
12452
12453         case ETHTOOL_ID_INACTIVE:
12454                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12455                 break;
12456         }
12457
12458         return 0;
12459 }
12460
12461 static void tg3_get_ethtool_stats(struct net_device *dev,
12462                                    struct ethtool_stats *estats, u64 *tmp_stats)
12463 {
12464         struct tg3 *tp = netdev_priv(dev);
12465
12466         if (tp->hw_stats)
12467                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12468         else
12469                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12470 }
12471
12472 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12473 {
12474         int i;
12475         __be32 *buf;
12476         u32 offset = 0, len = 0;
12477         u32 magic, val;
12478
12479         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12480                 return NULL;
12481
12482         if (magic == TG3_EEPROM_MAGIC) {
12483                 for (offset = TG3_NVM_DIR_START;
12484                      offset < TG3_NVM_DIR_END;
12485                      offset += TG3_NVM_DIRENT_SIZE) {
12486                         if (tg3_nvram_read(tp, offset, &val))
12487                                 return NULL;
12488
12489                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12490                             TG3_NVM_DIRTYPE_EXTVPD)
12491                                 break;
12492                 }
12493
12494                 if (offset != TG3_NVM_DIR_END) {
12495                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12496                         if (tg3_nvram_read(tp, offset + 4, &offset))
12497                                 return NULL;
12498
12499                         offset = tg3_nvram_logical_addr(tp, offset);
12500                 }
12501         }
12502
12503         if (!offset || !len) {
12504                 offset = TG3_NVM_VPD_OFF;
12505                 len = TG3_NVM_VPD_LEN;
12506         }
12507
12508         buf = kmalloc(len, GFP_KERNEL);
12509         if (buf == NULL)
12510                 return NULL;
12511
12512         if (magic == TG3_EEPROM_MAGIC) {
12513                 for (i = 0; i < len; i += 4) {
12514                         /* The data is in little-endian format in NVRAM.
12515                          * Use the big-endian read routines to preserve
12516                          * the byte order as it exists in NVRAM.
12517                          */
12518                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12519                                 goto error;
12520                 }
12521         } else {
12522                 u8 *ptr;
12523                 ssize_t cnt;
12524                 unsigned int pos = 0;
12525
12526                 ptr = (u8 *)&buf[0];
12527                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12528                         cnt = pci_read_vpd(tp->pdev, pos,
12529                                            len - pos, ptr);
12530                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12531                                 cnt = 0;
12532                         else if (cnt < 0)
12533                                 goto error;
12534                 }
12535                 if (pos != len)
12536                         goto error;
12537         }
12538
12539         *vpdlen = len;
12540
12541         return buf;
12542
12543 error:
12544         kfree(buf);
12545         return NULL;
12546 }
12547
12548 #define NVRAM_TEST_SIZE 0x100
12549 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12550 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12551 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12552 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12553 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12554 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12555 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12556 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12557
12558 static int tg3_test_nvram(struct tg3 *tp)
12559 {
12560         u32 csum, magic, len;
12561         __be32 *buf;
12562         int i, j, k, err = 0, size;
12563
12564         if (tg3_flag(tp, NO_NVRAM))
12565                 return 0;
12566
12567         if (tg3_nvram_read(tp, 0, &magic) != 0)
12568                 return -EIO;
12569
12570         if (magic == TG3_EEPROM_MAGIC)
12571                 size = NVRAM_TEST_SIZE;
12572         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12573                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12574                     TG3_EEPROM_SB_FORMAT_1) {
12575                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12576                         case TG3_EEPROM_SB_REVISION_0:
12577                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12578                                 break;
12579                         case TG3_EEPROM_SB_REVISION_2:
12580                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12581                                 break;
12582                         case TG3_EEPROM_SB_REVISION_3:
12583                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12584                                 break;
12585                         case TG3_EEPROM_SB_REVISION_4:
12586                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12587                                 break;
12588                         case TG3_EEPROM_SB_REVISION_5:
12589                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12590                                 break;
12591                         case TG3_EEPROM_SB_REVISION_6:
12592                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12593                                 break;
12594                         default:
12595                                 return -EIO;
12596                         }
12597                 } else
12598                         return 0;
12599         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12600                 size = NVRAM_SELFBOOT_HW_SIZE;
12601         else
12602                 return -EIO;
12603
12604         buf = kmalloc(size, GFP_KERNEL);
12605         if (buf == NULL)
12606                 return -ENOMEM;
12607
12608         err = -EIO;
12609         for (i = 0, j = 0; i < size; i += 4, j++) {
12610                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12611                 if (err)
12612                         break;
12613         }
12614         if (i < size)
12615                 goto out;
12616
12617         /* Selfboot format */
12618         magic = be32_to_cpu(buf[0]);
12619         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12620             TG3_EEPROM_MAGIC_FW) {
12621                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12622
12623                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12624                     TG3_EEPROM_SB_REVISION_2) {
12625                         /* For rev 2, the csum doesn't include the MBA. */
12626                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12627                                 csum8 += buf8[i];
12628                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12629                                 csum8 += buf8[i];
12630                 } else {
12631                         for (i = 0; i < size; i++)
12632                                 csum8 += buf8[i];
12633                 }
12634
12635                 if (csum8 == 0) {
12636                         err = 0;
12637                         goto out;
12638                 }
12639
12640                 err = -EIO;
12641                 goto out;
12642         }
12643
12644         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12645             TG3_EEPROM_MAGIC_HW) {
12646                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12647                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12648                 u8 *buf8 = (u8 *) buf;
12649
12650                 /* Separate the parity bits and the data bytes.  */
12651                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12652                         if ((i == 0) || (i == 8)) {
12653                                 int l;
12654                                 u8 msk;
12655
12656                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12657                                         parity[k++] = buf8[i] & msk;
12658                                 i++;
12659                         } else if (i == 16) {
12660                                 int l;
12661                                 u8 msk;
12662
12663                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12664                                         parity[k++] = buf8[i] & msk;
12665                                 i++;
12666
12667                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12668                                         parity[k++] = buf8[i] & msk;
12669                                 i++;
12670                         }
12671                         data[j++] = buf8[i];
12672                 }
12673
12674                 err = -EIO;
12675                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12676                         u8 hw8 = hweight8(data[i]);
12677
12678                         if ((hw8 & 0x1) && parity[i])
12679                                 goto out;
12680                         else if (!(hw8 & 0x1) && !parity[i])
12681                                 goto out;
12682                 }
12683                 err = 0;
12684                 goto out;
12685         }
12686
12687         err = -EIO;
12688
12689         /* Bootstrap checksum at offset 0x10 */
12690         csum = calc_crc((unsigned char *) buf, 0x10);
12691         if (csum != le32_to_cpu(buf[0x10/4]))
12692                 goto out;
12693
12694         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12695         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12696         if (csum != le32_to_cpu(buf[0xfc/4]))
12697                 goto out;
12698
12699         kfree(buf);
12700
12701         buf = tg3_vpd_readblock(tp, &len);
12702         if (!buf)
12703                 return -ENOMEM;
12704
12705         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12706         if (i > 0) {
12707                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12708                 if (j < 0)
12709                         goto out;
12710
12711                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12712                         goto out;
12713
12714                 i += PCI_VPD_LRDT_TAG_SIZE;
12715                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12716                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12717                 if (j > 0) {
12718                         u8 csum8 = 0;
12719
12720                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12721
12722                         for (i = 0; i <= j; i++)
12723                                 csum8 += ((u8 *)buf)[i];
12724
12725                         if (csum8)
12726                                 goto out;
12727                 }
12728         }
12729
12730         err = 0;
12731
12732 out:
12733         kfree(buf);
12734         return err;
12735 }
12736
12737 #define TG3_SERDES_TIMEOUT_SEC  2
12738 #define TG3_COPPER_TIMEOUT_SEC  6
12739
12740 static int tg3_test_link(struct tg3 *tp)
12741 {
12742         int i, max;
12743
12744         if (!netif_running(tp->dev))
12745                 return -ENODEV;
12746
12747         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12748                 max = TG3_SERDES_TIMEOUT_SEC;
12749         else
12750                 max = TG3_COPPER_TIMEOUT_SEC;
12751
12752         for (i = 0; i < max; i++) {
12753                 if (tp->link_up)
12754                         return 0;
12755
12756                 if (msleep_interruptible(1000))
12757                         break;
12758         }
12759
12760         return -EIO;
12761 }
12762
12763 /* Only test the commonly used registers */
12764 static int tg3_test_registers(struct tg3 *tp)
12765 {
12766         int i, is_5705, is_5750;
12767         u32 offset, read_mask, write_mask, val, save_val, read_val;
12768         static struct {
12769                 u16 offset;
12770                 u16 flags;
12771 #define TG3_FL_5705     0x1
12772 #define TG3_FL_NOT_5705 0x2
12773 #define TG3_FL_NOT_5788 0x4
12774 #define TG3_FL_NOT_5750 0x8
12775                 u32 read_mask;
12776                 u32 write_mask;
12777         } reg_tbl[] = {
12778                 /* MAC Control Registers */
12779                 { MAC_MODE, TG3_FL_NOT_5705,
12780                         0x00000000, 0x00ef6f8c },
12781                 { MAC_MODE, TG3_FL_5705,
12782                         0x00000000, 0x01ef6b8c },
12783                 { MAC_STATUS, TG3_FL_NOT_5705,
12784                         0x03800107, 0x00000000 },
12785                 { MAC_STATUS, TG3_FL_5705,
12786                         0x03800100, 0x00000000 },
12787                 { MAC_ADDR_0_HIGH, 0x0000,
12788                         0x00000000, 0x0000ffff },
12789                 { MAC_ADDR_0_LOW, 0x0000,
12790                         0x00000000, 0xffffffff },
12791                 { MAC_RX_MTU_SIZE, 0x0000,
12792                         0x00000000, 0x0000ffff },
12793                 { MAC_TX_MODE, 0x0000,
12794                         0x00000000, 0x00000070 },
12795                 { MAC_TX_LENGTHS, 0x0000,
12796                         0x00000000, 0x00003fff },
12797                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12798                         0x00000000, 0x000007fc },
12799                 { MAC_RX_MODE, TG3_FL_5705,
12800                         0x00000000, 0x000007dc },
12801                 { MAC_HASH_REG_0, 0x0000,
12802                         0x00000000, 0xffffffff },
12803                 { MAC_HASH_REG_1, 0x0000,
12804                         0x00000000, 0xffffffff },
12805                 { MAC_HASH_REG_2, 0x0000,
12806                         0x00000000, 0xffffffff },
12807                 { MAC_HASH_REG_3, 0x0000,
12808                         0x00000000, 0xffffffff },
12809
12810                 /* Receive Data and Receive BD Initiator Control Registers. */
12811                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12812                         0x00000000, 0xffffffff },
12813                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12814                         0x00000000, 0xffffffff },
12815                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12816                         0x00000000, 0x00000003 },
12817                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12818                         0x00000000, 0xffffffff },
12819                 { RCVDBDI_STD_BD+0, 0x0000,
12820                         0x00000000, 0xffffffff },
12821                 { RCVDBDI_STD_BD+4, 0x0000,
12822                         0x00000000, 0xffffffff },
12823                 { RCVDBDI_STD_BD+8, 0x0000,
12824                         0x00000000, 0xffff0002 },
12825                 { RCVDBDI_STD_BD+0xc, 0x0000,
12826                         0x00000000, 0xffffffff },
12827
12828                 /* Receive BD Initiator Control Registers. */
12829                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12830                         0x00000000, 0xffffffff },
12831                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12832                         0x00000000, 0x000003ff },
12833                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12834                         0x00000000, 0xffffffff },
12835
12836                 /* Host Coalescing Control Registers. */
12837                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12838                         0x00000000, 0x00000004 },
12839                 { HOSTCC_MODE, TG3_FL_5705,
12840                         0x00000000, 0x000000f6 },
12841                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12842                         0x00000000, 0xffffffff },
12843                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12844                         0x00000000, 0x000003ff },
12845                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12846                         0x00000000, 0xffffffff },
12847                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12848                         0x00000000, 0x000003ff },
12849                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12850                         0x00000000, 0xffffffff },
12851                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12852                         0x00000000, 0x000000ff },
12853                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12854                         0x00000000, 0xffffffff },
12855                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12856                         0x00000000, 0x000000ff },
12857                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12858                         0x00000000, 0xffffffff },
12859                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12860                         0x00000000, 0xffffffff },
12861                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12862                         0x00000000, 0xffffffff },
12863                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12864                         0x00000000, 0x000000ff },
12865                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12866                         0x00000000, 0xffffffff },
12867                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12868                         0x00000000, 0x000000ff },
12869                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12870                         0x00000000, 0xffffffff },
12871                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12872                         0x00000000, 0xffffffff },
12873                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12874                         0x00000000, 0xffffffff },
12875                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12876                         0x00000000, 0xffffffff },
12877                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12878                         0x00000000, 0xffffffff },
12879                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12880                         0xffffffff, 0x00000000 },
12881                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12882                         0xffffffff, 0x00000000 },
12883
12884                 /* Buffer Manager Control Registers. */
12885                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12886                         0x00000000, 0x007fff80 },
12887                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12888                         0x00000000, 0x007fffff },
12889                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12890                         0x00000000, 0x0000003f },
12891                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12892                         0x00000000, 0x000001ff },
12893                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12894                         0x00000000, 0x000001ff },
12895                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12896                         0xffffffff, 0x00000000 },
12897                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12898                         0xffffffff, 0x00000000 },
12899
12900                 /* Mailbox Registers */
12901                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12902                         0x00000000, 0x000001ff },
12903                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12904                         0x00000000, 0x000001ff },
12905                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12906                         0x00000000, 0x000007ff },
12907                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12908                         0x00000000, 0x000001ff },
12909
12910                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12911         };
12912
12913         is_5705 = is_5750 = 0;
12914         if (tg3_flag(tp, 5705_PLUS)) {
12915                 is_5705 = 1;
12916                 if (tg3_flag(tp, 5750_PLUS))
12917                         is_5750 = 1;
12918         }
12919
12920         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12921                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12922                         continue;
12923
12924                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12925                         continue;
12926
12927                 if (tg3_flag(tp, IS_5788) &&
12928                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12929                         continue;
12930
12931                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12932                         continue;
12933
12934                 offset = (u32) reg_tbl[i].offset;
12935                 read_mask = reg_tbl[i].read_mask;
12936                 write_mask = reg_tbl[i].write_mask;
12937
12938                 /* Save the original register content */
12939                 save_val = tr32(offset);
12940
12941                 /* Determine the read-only value. */
12942                 read_val = save_val & read_mask;
12943
12944                 /* Write zero to the register, then make sure the read-only bits
12945                  * are not changed and the read/write bits are all zeros.
12946                  */
12947                 tw32(offset, 0);
12948
12949                 val = tr32(offset);
12950
12951                 /* Test the read-only and read/write bits. */
12952                 if (((val & read_mask) != read_val) || (val & write_mask))
12953                         goto out;
12954
12955                 /* Write ones to all the bits defined by RdMask and WrMask, then
12956                  * make sure the read-only bits are not changed and the
12957                  * read/write bits are all ones.
12958                  */
12959                 tw32(offset, read_mask | write_mask);
12960
12961                 val = tr32(offset);
12962
12963                 /* Test the read-only bits. */
12964                 if ((val & read_mask) != read_val)
12965                         goto out;
12966
12967                 /* Test the read/write bits. */
12968                 if ((val & write_mask) != write_mask)
12969                         goto out;
12970
12971                 tw32(offset, save_val);
12972         }
12973
12974         return 0;
12975
12976 out:
12977         if (netif_msg_hw(tp))
12978                 netdev_err(tp->dev,
12979                            "Register test failed at offset %x\n", offset);
12980         tw32(offset, save_val);
12981         return -EIO;
12982 }
12983
12984 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12985 {
12986         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12987         int i;
12988         u32 j;
12989
12990         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12991                 for (j = 0; j < len; j += 4) {
12992                         u32 val;
12993
12994                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12995                         tg3_read_mem(tp, offset + j, &val);
12996                         if (val != test_pattern[i])
12997                                 return -EIO;
12998                 }
12999         }
13000         return 0;
13001 }
13002
13003 static int tg3_test_memory(struct tg3 *tp)
13004 {
13005         static struct mem_entry {
13006                 u32 offset;
13007                 u32 len;
13008         } mem_tbl_570x[] = {
13009                 { 0x00000000, 0x00b50},
13010                 { 0x00002000, 0x1c000},
13011                 { 0xffffffff, 0x00000}
13012         }, mem_tbl_5705[] = {
13013                 { 0x00000100, 0x0000c},
13014                 { 0x00000200, 0x00008},
13015                 { 0x00004000, 0x00800},
13016                 { 0x00006000, 0x01000},
13017                 { 0x00008000, 0x02000},
13018                 { 0x00010000, 0x0e000},
13019                 { 0xffffffff, 0x00000}
13020         }, mem_tbl_5755[] = {
13021                 { 0x00000200, 0x00008},
13022                 { 0x00004000, 0x00800},
13023                 { 0x00006000, 0x00800},
13024                 { 0x00008000, 0x02000},
13025                 { 0x00010000, 0x0c000},
13026                 { 0xffffffff, 0x00000}
13027         }, mem_tbl_5906[] = {
13028                 { 0x00000200, 0x00008},
13029                 { 0x00004000, 0x00400},
13030                 { 0x00006000, 0x00400},
13031                 { 0x00008000, 0x01000},
13032                 { 0x00010000, 0x01000},
13033                 { 0xffffffff, 0x00000}
13034         }, mem_tbl_5717[] = {
13035                 { 0x00000200, 0x00008},
13036                 { 0x00010000, 0x0a000},
13037                 { 0x00020000, 0x13c00},
13038                 { 0xffffffff, 0x00000}
13039         }, mem_tbl_57765[] = {
13040                 { 0x00000200, 0x00008},
13041                 { 0x00004000, 0x00800},
13042                 { 0x00006000, 0x09800},
13043                 { 0x00010000, 0x0a000},
13044                 { 0xffffffff, 0x00000}
13045         };
13046         struct mem_entry *mem_tbl;
13047         int err = 0;
13048         int i;
13049
13050         if (tg3_flag(tp, 5717_PLUS))
13051                 mem_tbl = mem_tbl_5717;
13052         else if (tg3_flag(tp, 57765_CLASS) ||
13053                  tg3_asic_rev(tp) == ASIC_REV_5762)
13054                 mem_tbl = mem_tbl_57765;
13055         else if (tg3_flag(tp, 5755_PLUS))
13056                 mem_tbl = mem_tbl_5755;
13057         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13058                 mem_tbl = mem_tbl_5906;
13059         else if (tg3_flag(tp, 5705_PLUS))
13060                 mem_tbl = mem_tbl_5705;
13061         else
13062                 mem_tbl = mem_tbl_570x;
13063
13064         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13065                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13066                 if (err)
13067                         break;
13068         }
13069
13070         return err;
13071 }
13072
13073 #define TG3_TSO_MSS             500
13074
13075 #define TG3_TSO_IP_HDR_LEN      20
13076 #define TG3_TSO_TCP_HDR_LEN     20
13077 #define TG3_TSO_TCP_OPT_LEN     12
13078
13079 static const u8 tg3_tso_header[] = {
13080 0x08, 0x00,
13081 0x45, 0x00, 0x00, 0x00,
13082 0x00, 0x00, 0x40, 0x00,
13083 0x40, 0x06, 0x00, 0x00,
13084 0x0a, 0x00, 0x00, 0x01,
13085 0x0a, 0x00, 0x00, 0x02,
13086 0x0d, 0x00, 0xe0, 0x00,
13087 0x00, 0x00, 0x01, 0x00,
13088 0x00, 0x00, 0x02, 0x00,
13089 0x80, 0x10, 0x10, 0x00,
13090 0x14, 0x09, 0x00, 0x00,
13091 0x01, 0x01, 0x08, 0x0a,
13092 0x11, 0x11, 0x11, 0x11,
13093 0x11, 0x11, 0x11, 0x11,
13094 };
13095
13096 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13097 {
13098         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13099         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13100         u32 budget;
13101         struct sk_buff *skb;
13102         u8 *tx_data, *rx_data;
13103         dma_addr_t map;
13104         int num_pkts, tx_len, rx_len, i, err;
13105         struct tg3_rx_buffer_desc *desc;
13106         struct tg3_napi *tnapi, *rnapi;
13107         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13108
13109         tnapi = &tp->napi[0];
13110         rnapi = &tp->napi[0];
13111         if (tp->irq_cnt > 1) {
13112                 if (tg3_flag(tp, ENABLE_RSS))
13113                         rnapi = &tp->napi[1];
13114                 if (tg3_flag(tp, ENABLE_TSS))
13115                         tnapi = &tp->napi[1];
13116         }
13117         coal_now = tnapi->coal_now | rnapi->coal_now;
13118
13119         err = -EIO;
13120
13121         tx_len = pktsz;
13122         skb = netdev_alloc_skb(tp->dev, tx_len);
13123         if (!skb)
13124                 return -ENOMEM;
13125
13126         tx_data = skb_put(skb, tx_len);
13127         memcpy(tx_data, tp->dev->dev_addr, 6);
13128         memset(tx_data + 6, 0x0, 8);
13129
13130         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13131
13132         if (tso_loopback) {
13133                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13134
13135                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13136                               TG3_TSO_TCP_OPT_LEN;
13137
13138                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13139                        sizeof(tg3_tso_header));
13140                 mss = TG3_TSO_MSS;
13141
13142                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13143                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13144
13145                 /* Set the total length field in the IP header */
13146                 iph->tot_len = htons((u16)(mss + hdr_len));
13147
13148                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13149                               TXD_FLAG_CPU_POST_DMA);
13150
13151                 if (tg3_flag(tp, HW_TSO_1) ||
13152                     tg3_flag(tp, HW_TSO_2) ||
13153                     tg3_flag(tp, HW_TSO_3)) {
13154                         struct tcphdr *th;
13155                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13156                         th = (struct tcphdr *)&tx_data[val];
13157                         th->check = 0;
13158                 } else
13159                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13160
13161                 if (tg3_flag(tp, HW_TSO_3)) {
13162                         mss |= (hdr_len & 0xc) << 12;
13163                         if (hdr_len & 0x10)
13164                                 base_flags |= 0x00000010;
13165                         base_flags |= (hdr_len & 0x3e0) << 5;
13166                 } else if (tg3_flag(tp, HW_TSO_2))
13167                         mss |= hdr_len << 9;
13168                 else if (tg3_flag(tp, HW_TSO_1) ||
13169                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13170                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13171                 } else {
13172                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13173                 }
13174
13175                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13176         } else {
13177                 num_pkts = 1;
13178                 data_off = ETH_HLEN;
13179
13180                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13181                     tx_len > VLAN_ETH_FRAME_LEN)
13182                         base_flags |= TXD_FLAG_JMB_PKT;
13183         }
13184
13185         for (i = data_off; i < tx_len; i++)
13186                 tx_data[i] = (u8) (i & 0xff);
13187
13188         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13189         if (pci_dma_mapping_error(tp->pdev, map)) {
13190                 dev_kfree_skb(skb);
13191                 return -EIO;
13192         }
13193
13194         val = tnapi->tx_prod;
13195         tnapi->tx_buffers[val].skb = skb;
13196         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13197
13198         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13199                rnapi->coal_now);
13200
13201         udelay(10);
13202
13203         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13204
13205         budget = tg3_tx_avail(tnapi);
13206         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13207                             base_flags | TXD_FLAG_END, mss, 0)) {
13208                 tnapi->tx_buffers[val].skb = NULL;
13209                 dev_kfree_skb(skb);
13210                 return -EIO;
13211         }
13212
13213         tnapi->tx_prod++;
13214
13215         /* Sync BD data before updating mailbox */
13216         wmb();
13217
13218         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13219         tr32_mailbox(tnapi->prodmbox);
13220
13221         udelay(10);
13222
13223         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13224         for (i = 0; i < 35; i++) {
13225                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13226                        coal_now);
13227
13228                 udelay(10);
13229
13230                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13231                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13232                 if ((tx_idx == tnapi->tx_prod) &&
13233                     (rx_idx == (rx_start_idx + num_pkts)))
13234                         break;
13235         }
13236
13237         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13238         dev_kfree_skb(skb);
13239
13240         if (tx_idx != tnapi->tx_prod)
13241                 goto out;
13242
13243         if (rx_idx != rx_start_idx + num_pkts)
13244                 goto out;
13245
13246         val = data_off;
13247         while (rx_idx != rx_start_idx) {
13248                 desc = &rnapi->rx_rcb[rx_start_idx++];
13249                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13250                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13251
13252                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13253                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13254                         goto out;
13255
13256                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13257                          - ETH_FCS_LEN;
13258
13259                 if (!tso_loopback) {
13260                         if (rx_len != tx_len)
13261                                 goto out;
13262
13263                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13264                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13265                                         goto out;
13266                         } else {
13267                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13268                                         goto out;
13269                         }
13270                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13271                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13272                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13273                         goto out;
13274                 }
13275
13276                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13277                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13278                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13279                                              mapping);
13280                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13281                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13282                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13283                                              mapping);
13284                 } else
13285                         goto out;
13286
13287                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13288                                             PCI_DMA_FROMDEVICE);
13289
13290                 rx_data += TG3_RX_OFFSET(tp);
13291                 for (i = data_off; i < rx_len; i++, val++) {
13292                         if (*(rx_data + i) != (u8) (val & 0xff))
13293                                 goto out;
13294                 }
13295         }
13296
13297         err = 0;
13298
13299         /* tg3_free_rings will unmap and free the rx_data */
13300 out:
13301         return err;
13302 }
13303
13304 #define TG3_STD_LOOPBACK_FAILED         1
13305 #define TG3_JMB_LOOPBACK_FAILED         2
13306 #define TG3_TSO_LOOPBACK_FAILED         4
13307 #define TG3_LOOPBACK_FAILED \
13308         (TG3_STD_LOOPBACK_FAILED | \
13309          TG3_JMB_LOOPBACK_FAILED | \
13310          TG3_TSO_LOOPBACK_FAILED)
13311
13312 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13313 {
13314         int err = -EIO;
13315         u32 eee_cap;
13316         u32 jmb_pkt_sz = 9000;
13317
13318         if (tp->dma_limit)
13319                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13320
13321         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13322         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13323
13324         if (!netif_running(tp->dev)) {
13325                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13326                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13327                 if (do_extlpbk)
13328                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13329                 goto done;
13330         }
13331
13332         err = tg3_reset_hw(tp, true);
13333         if (err) {
13334                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13335                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13336                 if (do_extlpbk)
13337                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13338                 goto done;
13339         }
13340
13341         if (tg3_flag(tp, ENABLE_RSS)) {
13342                 int i;
13343
13344                 /* Reroute all rx packets to the 1st queue */
13345                 for (i = MAC_RSS_INDIR_TBL_0;
13346                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13347                         tw32(i, 0x0);
13348         }
13349
13350         /* HW errata - mac loopback fails in some cases on 5780.
13351          * Normal traffic and PHY loopback are not affected by
13352          * errata.  Also, the MAC loopback test is deprecated for
13353          * all newer ASIC revisions.
13354          */
13355         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13356             !tg3_flag(tp, CPMU_PRESENT)) {
13357                 tg3_mac_loopback(tp, true);
13358
13359                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13360                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13361
13362                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13363                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13364                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13365
13366                 tg3_mac_loopback(tp, false);
13367         }
13368
13369         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13370             !tg3_flag(tp, USE_PHYLIB)) {
13371                 int i;
13372
13373                 tg3_phy_lpbk_set(tp, 0, false);
13374
13375                 /* Wait for link */
13376                 for (i = 0; i < 100; i++) {
13377                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13378                                 break;
13379                         mdelay(1);
13380                 }
13381
13382                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13383                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13384                 if (tg3_flag(tp, TSO_CAPABLE) &&
13385                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13386                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13387                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13388                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13389                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13390
13391                 if (do_extlpbk) {
13392                         tg3_phy_lpbk_set(tp, 0, true);
13393
13394                         /* All link indications report up, but the hardware
13395                          * isn't really ready for about 20 msec.  Double it
13396                          * to be sure.
13397                          */
13398                         mdelay(40);
13399
13400                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13401                                 data[TG3_EXT_LOOPB_TEST] |=
13402                                                         TG3_STD_LOOPBACK_FAILED;
13403                         if (tg3_flag(tp, TSO_CAPABLE) &&
13404                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13405                                 data[TG3_EXT_LOOPB_TEST] |=
13406                                                         TG3_TSO_LOOPBACK_FAILED;
13407                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13408                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13409                                 data[TG3_EXT_LOOPB_TEST] |=
13410                                                         TG3_JMB_LOOPBACK_FAILED;
13411                 }
13412
13413                 /* Re-enable gphy autopowerdown. */
13414                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13415                         tg3_phy_toggle_apd(tp, true);
13416         }
13417
13418         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13419                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13420
13421 done:
13422         tp->phy_flags |= eee_cap;
13423
13424         return err;
13425 }
13426
13427 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13428                           u64 *data)
13429 {
13430         struct tg3 *tp = netdev_priv(dev);
13431         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13432
13433         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13434                 if (tg3_power_up(tp)) {
13435                         etest->flags |= ETH_TEST_FL_FAILED;
13436                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13437                         return;
13438                 }
13439                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13440         }
13441
13442         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13443
13444         if (tg3_test_nvram(tp) != 0) {
13445                 etest->flags |= ETH_TEST_FL_FAILED;
13446                 data[TG3_NVRAM_TEST] = 1;
13447         }
13448         if (!doextlpbk && tg3_test_link(tp)) {
13449                 etest->flags |= ETH_TEST_FL_FAILED;
13450                 data[TG3_LINK_TEST] = 1;
13451         }
13452         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13453                 int err, err2 = 0, irq_sync = 0;
13454
13455                 if (netif_running(dev)) {
13456                         tg3_phy_stop(tp);
13457                         tg3_netif_stop(tp);
13458                         irq_sync = 1;
13459                 }
13460
13461                 tg3_full_lock(tp, irq_sync);
13462                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13463                 err = tg3_nvram_lock(tp);
13464                 tg3_halt_cpu(tp, RX_CPU_BASE);
13465                 if (!tg3_flag(tp, 5705_PLUS))
13466                         tg3_halt_cpu(tp, TX_CPU_BASE);
13467                 if (!err)
13468                         tg3_nvram_unlock(tp);
13469
13470                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13471                         tg3_phy_reset(tp);
13472
13473                 if (tg3_test_registers(tp) != 0) {
13474                         etest->flags |= ETH_TEST_FL_FAILED;
13475                         data[TG3_REGISTER_TEST] = 1;
13476                 }
13477
13478                 if (tg3_test_memory(tp) != 0) {
13479                         etest->flags |= ETH_TEST_FL_FAILED;
13480                         data[TG3_MEMORY_TEST] = 1;
13481                 }
13482
13483                 if (doextlpbk)
13484                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13485
13486                 if (tg3_test_loopback(tp, data, doextlpbk))
13487                         etest->flags |= ETH_TEST_FL_FAILED;
13488
13489                 tg3_full_unlock(tp);
13490
13491                 if (tg3_test_interrupt(tp) != 0) {
13492                         etest->flags |= ETH_TEST_FL_FAILED;
13493                         data[TG3_INTERRUPT_TEST] = 1;
13494                 }
13495
13496                 tg3_full_lock(tp, 0);
13497
13498                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13499                 if (netif_running(dev)) {
13500                         tg3_flag_set(tp, INIT_COMPLETE);
13501                         err2 = tg3_restart_hw(tp, true);
13502                         if (!err2)
13503                                 tg3_netif_start(tp);
13504                 }
13505
13506                 tg3_full_unlock(tp);
13507
13508                 if (irq_sync && !err2)
13509                         tg3_phy_start(tp);
13510         }
13511         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13512                 tg3_power_down_prepare(tp);
13513
13514 }
13515
13516 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13517                               struct ifreq *ifr, int cmd)
13518 {
13519         struct tg3 *tp = netdev_priv(dev);
13520         struct hwtstamp_config stmpconf;
13521
13522         if (!tg3_flag(tp, PTP_CAPABLE))
13523                 return -EINVAL;
13524
13525         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13526                 return -EFAULT;
13527
13528         if (stmpconf.flags)
13529                 return -EINVAL;
13530
13531         switch (stmpconf.tx_type) {
13532         case HWTSTAMP_TX_ON:
13533                 tg3_flag_set(tp, TX_TSTAMP_EN);
13534                 break;
13535         case HWTSTAMP_TX_OFF:
13536                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13537                 break;
13538         default:
13539                 return -ERANGE;
13540         }
13541
13542         switch (stmpconf.rx_filter) {
13543         case HWTSTAMP_FILTER_NONE:
13544                 tp->rxptpctl = 0;
13545                 break;
13546         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13547                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13548                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13549                 break;
13550         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13551                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13552                                TG3_RX_PTP_CTL_SYNC_EVNT;
13553                 break;
13554         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13555                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13556                                TG3_RX_PTP_CTL_DELAY_REQ;
13557                 break;
13558         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13559                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13560                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13561                 break;
13562         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13563                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13564                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13565                 break;
13566         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13567                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13568                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13569                 break;
13570         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13571                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13572                                TG3_RX_PTP_CTL_SYNC_EVNT;
13573                 break;
13574         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13575                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13576                                TG3_RX_PTP_CTL_SYNC_EVNT;
13577                 break;
13578         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13579                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13580                                TG3_RX_PTP_CTL_SYNC_EVNT;
13581                 break;
13582         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13583                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13584                                TG3_RX_PTP_CTL_DELAY_REQ;
13585                 break;
13586         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13587                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13588                                TG3_RX_PTP_CTL_DELAY_REQ;
13589                 break;
13590         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13591                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13592                                TG3_RX_PTP_CTL_DELAY_REQ;
13593                 break;
13594         default:
13595                 return -ERANGE;
13596         }
13597
13598         if (netif_running(dev) && tp->rxptpctl)
13599                 tw32(TG3_RX_PTP_CTL,
13600                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13601
13602         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13603                 -EFAULT : 0;
13604 }
13605
13606 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13607 {
13608         struct mii_ioctl_data *data = if_mii(ifr);
13609         struct tg3 *tp = netdev_priv(dev);
13610         int err;
13611
13612         if (tg3_flag(tp, USE_PHYLIB)) {
13613                 struct phy_device *phydev;
13614                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13615                         return -EAGAIN;
13616                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13617                 return phy_mii_ioctl(phydev, ifr, cmd);
13618         }
13619
13620         switch (cmd) {
13621         case SIOCGMIIPHY:
13622                 data->phy_id = tp->phy_addr;
13623
13624                 /* fallthru */
13625         case SIOCGMIIREG: {
13626                 u32 mii_regval;
13627
13628                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13629                         break;                  /* We have no PHY */
13630
13631                 if (!netif_running(dev))
13632                         return -EAGAIN;
13633
13634                 spin_lock_bh(&tp->lock);
13635                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13636                                     data->reg_num & 0x1f, &mii_regval);
13637                 spin_unlock_bh(&tp->lock);
13638
13639                 data->val_out = mii_regval;
13640
13641                 return err;
13642         }
13643
13644         case SIOCSMIIREG:
13645                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13646                         break;                  /* We have no PHY */
13647
13648                 if (!netif_running(dev))
13649                         return -EAGAIN;
13650
13651                 spin_lock_bh(&tp->lock);
13652                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13653                                      data->reg_num & 0x1f, data->val_in);
13654                 spin_unlock_bh(&tp->lock);
13655
13656                 return err;
13657
13658         case SIOCSHWTSTAMP:
13659                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13660
13661         default:
13662                 /* do nothing */
13663                 break;
13664         }
13665         return -EOPNOTSUPP;
13666 }
13667
13668 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13669 {
13670         struct tg3 *tp = netdev_priv(dev);
13671
13672         memcpy(ec, &tp->coal, sizeof(*ec));
13673         return 0;
13674 }
13675
13676 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13677 {
13678         struct tg3 *tp = netdev_priv(dev);
13679         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13680         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13681
13682         if (!tg3_flag(tp, 5705_PLUS)) {
13683                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13684                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13685                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13686                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13687         }
13688
13689         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13690             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13691             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13692             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13693             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13694             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13695             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13696             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13697             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13698             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13699                 return -EINVAL;
13700
13701         /* No rx interrupts will be generated if both are zero */
13702         if ((ec->rx_coalesce_usecs == 0) &&
13703             (ec->rx_max_coalesced_frames == 0))
13704                 return -EINVAL;
13705
13706         /* No tx interrupts will be generated if both are zero */
13707         if ((ec->tx_coalesce_usecs == 0) &&
13708             (ec->tx_max_coalesced_frames == 0))
13709                 return -EINVAL;
13710
13711         /* Only copy relevant parameters, ignore all others. */
13712         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13713         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13714         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13715         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13716         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13717         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13718         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13719         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13720         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13721
13722         if (netif_running(dev)) {
13723                 tg3_full_lock(tp, 0);
13724                 __tg3_set_coalesce(tp, &tp->coal);
13725                 tg3_full_unlock(tp);
13726         }
13727         return 0;
13728 }
13729
13730 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13731 {
13732         struct tg3 *tp = netdev_priv(dev);
13733
13734         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13735                 netdev_warn(tp->dev, "Board does not support EEE!\n");
13736                 return -EOPNOTSUPP;
13737         }
13738
13739         if (edata->advertised != tp->eee.advertised) {
13740                 netdev_warn(tp->dev,
13741                             "Direct manipulation of EEE advertisement is not supported\n");
13742                 return -EINVAL;
13743         }
13744
13745         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13746                 netdev_warn(tp->dev,
13747                             "Maximal Tx Lpi timer supported is %#x(u)\n",
13748                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13749                 return -EINVAL;
13750         }
13751
13752         tp->eee = *edata;
13753
13754         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13755         tg3_warn_mgmt_link_flap(tp);
13756
13757         if (netif_running(tp->dev)) {
13758                 tg3_full_lock(tp, 0);
13759                 tg3_setup_eee(tp);
13760                 tg3_phy_reset(tp);
13761                 tg3_full_unlock(tp);
13762         }
13763
13764         return 0;
13765 }
13766
13767 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13768 {
13769         struct tg3 *tp = netdev_priv(dev);
13770
13771         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13772                 netdev_warn(tp->dev,
13773                             "Board does not support EEE!\n");
13774                 return -EOPNOTSUPP;
13775         }
13776
13777         *edata = tp->eee;
13778         return 0;
13779 }
13780
13781 static const struct ethtool_ops tg3_ethtool_ops = {
13782         .get_settings           = tg3_get_settings,
13783         .set_settings           = tg3_set_settings,
13784         .get_drvinfo            = tg3_get_drvinfo,
13785         .get_regs_len           = tg3_get_regs_len,
13786         .get_regs               = tg3_get_regs,
13787         .get_wol                = tg3_get_wol,
13788         .set_wol                = tg3_set_wol,
13789         .get_msglevel           = tg3_get_msglevel,
13790         .set_msglevel           = tg3_set_msglevel,
13791         .nway_reset             = tg3_nway_reset,
13792         .get_link               = ethtool_op_get_link,
13793         .get_eeprom_len         = tg3_get_eeprom_len,
13794         .get_eeprom             = tg3_get_eeprom,
13795         .set_eeprom             = tg3_set_eeprom,
13796         .get_ringparam          = tg3_get_ringparam,
13797         .set_ringparam          = tg3_set_ringparam,
13798         .get_pauseparam         = tg3_get_pauseparam,
13799         .set_pauseparam         = tg3_set_pauseparam,
13800         .self_test              = tg3_self_test,
13801         .get_strings            = tg3_get_strings,
13802         .set_phys_id            = tg3_set_phys_id,
13803         .get_ethtool_stats      = tg3_get_ethtool_stats,
13804         .get_coalesce           = tg3_get_coalesce,
13805         .set_coalesce           = tg3_set_coalesce,
13806         .get_sset_count         = tg3_get_sset_count,
13807         .get_rxnfc              = tg3_get_rxnfc,
13808         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13809         .get_rxfh_indir         = tg3_get_rxfh_indir,
13810         .set_rxfh_indir         = tg3_set_rxfh_indir,
13811         .get_channels           = tg3_get_channels,
13812         .set_channels           = tg3_set_channels,
13813         .get_ts_info            = tg3_get_ts_info,
13814         .get_eee                = tg3_get_eee,
13815         .set_eee                = tg3_set_eee,
13816 };
13817
13818 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13819                                                 struct rtnl_link_stats64 *stats)
13820 {
13821         struct tg3 *tp = netdev_priv(dev);
13822
13823         spin_lock_bh(&tp->lock);
13824         if (!tp->hw_stats) {
13825                 spin_unlock_bh(&tp->lock);
13826                 return &tp->net_stats_prev;
13827         }
13828
13829         tg3_get_nstats(tp, stats);
13830         spin_unlock_bh(&tp->lock);
13831
13832         return stats;
13833 }
13834
13835 static void tg3_set_rx_mode(struct net_device *dev)
13836 {
13837         struct tg3 *tp = netdev_priv(dev);
13838
13839         if (!netif_running(dev))
13840                 return;
13841
13842         tg3_full_lock(tp, 0);
13843         __tg3_set_rx_mode(dev);
13844         tg3_full_unlock(tp);
13845 }
13846
13847 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13848                                int new_mtu)
13849 {
13850         dev->mtu = new_mtu;
13851
13852         if (new_mtu > ETH_DATA_LEN) {
13853                 if (tg3_flag(tp, 5780_CLASS)) {
13854                         netdev_update_features(dev);
13855                         tg3_flag_clear(tp, TSO_CAPABLE);
13856                 } else {
13857                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13858                 }
13859         } else {
13860                 if (tg3_flag(tp, 5780_CLASS)) {
13861                         tg3_flag_set(tp, TSO_CAPABLE);
13862                         netdev_update_features(dev);
13863                 }
13864                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13865         }
13866 }
13867
13868 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13869 {
13870         struct tg3 *tp = netdev_priv(dev);
13871         int err;
13872         bool reset_phy = false;
13873
13874         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13875                 return -EINVAL;
13876
13877         if (!netif_running(dev)) {
13878                 /* We'll just catch it later when the
13879                  * device is up'd.
13880                  */
13881                 tg3_set_mtu(dev, tp, new_mtu);
13882                 return 0;
13883         }
13884
13885         tg3_phy_stop(tp);
13886
13887         tg3_netif_stop(tp);
13888
13889         tg3_full_lock(tp, 1);
13890
13891         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13892
13893         tg3_set_mtu(dev, tp, new_mtu);
13894
13895         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13896          * breaks all requests to 256 bytes.
13897          */
13898         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13899                 reset_phy = true;
13900
13901         err = tg3_restart_hw(tp, reset_phy);
13902
13903         if (!err)
13904                 tg3_netif_start(tp);
13905
13906         tg3_full_unlock(tp);
13907
13908         if (!err)
13909                 tg3_phy_start(tp);
13910
13911         return err;
13912 }
13913
13914 static const struct net_device_ops tg3_netdev_ops = {
13915         .ndo_open               = tg3_open,
13916         .ndo_stop               = tg3_close,
13917         .ndo_start_xmit         = tg3_start_xmit,
13918         .ndo_get_stats64        = tg3_get_stats64,
13919         .ndo_validate_addr      = eth_validate_addr,
13920         .ndo_set_rx_mode        = tg3_set_rx_mode,
13921         .ndo_set_mac_address    = tg3_set_mac_addr,
13922         .ndo_do_ioctl           = tg3_ioctl,
13923         .ndo_tx_timeout         = tg3_tx_timeout,
13924         .ndo_change_mtu         = tg3_change_mtu,
13925         .ndo_fix_features       = tg3_fix_features,
13926         .ndo_set_features       = tg3_set_features,
13927 #ifdef CONFIG_NET_POLL_CONTROLLER
13928         .ndo_poll_controller    = tg3_poll_controller,
13929 #endif
13930 };
13931
13932 static void tg3_get_eeprom_size(struct tg3 *tp)
13933 {
13934         u32 cursize, val, magic;
13935
13936         tp->nvram_size = EEPROM_CHIP_SIZE;
13937
13938         if (tg3_nvram_read(tp, 0, &magic) != 0)
13939                 return;
13940
13941         if ((magic != TG3_EEPROM_MAGIC) &&
13942             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13943             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13944                 return;
13945
13946         /*
13947          * Size the chip by reading offsets at increasing powers of two.
13948          * When we encounter our validation signature, we know the addressing
13949          * has wrapped around, and thus have our chip size.
13950          */
13951         cursize = 0x10;
13952
13953         while (cursize < tp->nvram_size) {
13954                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13955                         return;
13956
13957                 if (val == magic)
13958                         break;
13959
13960                 cursize <<= 1;
13961         }
13962
13963         tp->nvram_size = cursize;
13964 }
13965
13966 static void tg3_get_nvram_size(struct tg3 *tp)
13967 {
13968         u32 val;
13969
13970         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13971                 return;
13972
13973         /* Selfboot format */
13974         if (val != TG3_EEPROM_MAGIC) {
13975                 tg3_get_eeprom_size(tp);
13976                 return;
13977         }
13978
13979         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13980                 if (val != 0) {
13981                         /* This is confusing.  We want to operate on the
13982                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13983                          * call will read from NVRAM and byteswap the data
13984                          * according to the byteswapping settings for all
13985                          * other register accesses.  This ensures the data we
13986                          * want will always reside in the lower 16-bits.
13987                          * However, the data in NVRAM is in LE format, which
13988                          * means the data from the NVRAM read will always be
13989                          * opposite the endianness of the CPU.  The 16-bit
13990                          * byteswap then brings the data to CPU endianness.
13991                          */
13992                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13993                         return;
13994                 }
13995         }
13996         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13997 }
13998
13999 static void tg3_get_nvram_info(struct tg3 *tp)
14000 {
14001         u32 nvcfg1;
14002
14003         nvcfg1 = tr32(NVRAM_CFG1);
14004         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14005                 tg3_flag_set(tp, FLASH);
14006         } else {
14007                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14008                 tw32(NVRAM_CFG1, nvcfg1);
14009         }
14010
14011         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14012             tg3_flag(tp, 5780_CLASS)) {
14013                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14014                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14015                         tp->nvram_jedecnum = JEDEC_ATMEL;
14016                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14017                         tg3_flag_set(tp, NVRAM_BUFFERED);
14018                         break;
14019                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14020                         tp->nvram_jedecnum = JEDEC_ATMEL;
14021                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14022                         break;
14023                 case FLASH_VENDOR_ATMEL_EEPROM:
14024                         tp->nvram_jedecnum = JEDEC_ATMEL;
14025                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14026                         tg3_flag_set(tp, NVRAM_BUFFERED);
14027                         break;
14028                 case FLASH_VENDOR_ST:
14029                         tp->nvram_jedecnum = JEDEC_ST;
14030                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14031                         tg3_flag_set(tp, NVRAM_BUFFERED);
14032                         break;
14033                 case FLASH_VENDOR_SAIFUN:
14034                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14035                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14036                         break;
14037                 case FLASH_VENDOR_SST_SMALL:
14038                 case FLASH_VENDOR_SST_LARGE:
14039                         tp->nvram_jedecnum = JEDEC_SST;
14040                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14041                         break;
14042                 }
14043         } else {
14044                 tp->nvram_jedecnum = JEDEC_ATMEL;
14045                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14046                 tg3_flag_set(tp, NVRAM_BUFFERED);
14047         }
14048 }
14049
14050 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14051 {
14052         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14053         case FLASH_5752PAGE_SIZE_256:
14054                 tp->nvram_pagesize = 256;
14055                 break;
14056         case FLASH_5752PAGE_SIZE_512:
14057                 tp->nvram_pagesize = 512;
14058                 break;
14059         case FLASH_5752PAGE_SIZE_1K:
14060                 tp->nvram_pagesize = 1024;
14061                 break;
14062         case FLASH_5752PAGE_SIZE_2K:
14063                 tp->nvram_pagesize = 2048;
14064                 break;
14065         case FLASH_5752PAGE_SIZE_4K:
14066                 tp->nvram_pagesize = 4096;
14067                 break;
14068         case FLASH_5752PAGE_SIZE_264:
14069                 tp->nvram_pagesize = 264;
14070                 break;
14071         case FLASH_5752PAGE_SIZE_528:
14072                 tp->nvram_pagesize = 528;
14073                 break;
14074         }
14075 }
14076
14077 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14078 {
14079         u32 nvcfg1;
14080
14081         nvcfg1 = tr32(NVRAM_CFG1);
14082
14083         /* NVRAM protection for TPM */
14084         if (nvcfg1 & (1 << 27))
14085                 tg3_flag_set(tp, PROTECTED_NVRAM);
14086
14087         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14088         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14089         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14090                 tp->nvram_jedecnum = JEDEC_ATMEL;
14091                 tg3_flag_set(tp, NVRAM_BUFFERED);
14092                 break;
14093         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14094                 tp->nvram_jedecnum = JEDEC_ATMEL;
14095                 tg3_flag_set(tp, NVRAM_BUFFERED);
14096                 tg3_flag_set(tp, FLASH);
14097                 break;
14098         case FLASH_5752VENDOR_ST_M45PE10:
14099         case FLASH_5752VENDOR_ST_M45PE20:
14100         case FLASH_5752VENDOR_ST_M45PE40:
14101                 tp->nvram_jedecnum = JEDEC_ST;
14102                 tg3_flag_set(tp, NVRAM_BUFFERED);
14103                 tg3_flag_set(tp, FLASH);
14104                 break;
14105         }
14106
14107         if (tg3_flag(tp, FLASH)) {
14108                 tg3_nvram_get_pagesize(tp, nvcfg1);
14109         } else {
14110                 /* For eeprom, set pagesize to maximum eeprom size */
14111                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14112
14113                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14114                 tw32(NVRAM_CFG1, nvcfg1);
14115         }
14116 }
14117
14118 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14119 {
14120         u32 nvcfg1, protect = 0;
14121
14122         nvcfg1 = tr32(NVRAM_CFG1);
14123
14124         /* NVRAM protection for TPM */
14125         if (nvcfg1 & (1 << 27)) {
14126                 tg3_flag_set(tp, PROTECTED_NVRAM);
14127                 protect = 1;
14128         }
14129
14130         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14131         switch (nvcfg1) {
14132         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14133         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14134         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14135         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14136                 tp->nvram_jedecnum = JEDEC_ATMEL;
14137                 tg3_flag_set(tp, NVRAM_BUFFERED);
14138                 tg3_flag_set(tp, FLASH);
14139                 tp->nvram_pagesize = 264;
14140                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14141                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14142                         tp->nvram_size = (protect ? 0x3e200 :
14143                                           TG3_NVRAM_SIZE_512KB);
14144                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14145                         tp->nvram_size = (protect ? 0x1f200 :
14146                                           TG3_NVRAM_SIZE_256KB);
14147                 else
14148                         tp->nvram_size = (protect ? 0x1f200 :
14149                                           TG3_NVRAM_SIZE_128KB);
14150                 break;
14151         case FLASH_5752VENDOR_ST_M45PE10:
14152         case FLASH_5752VENDOR_ST_M45PE20:
14153         case FLASH_5752VENDOR_ST_M45PE40:
14154                 tp->nvram_jedecnum = JEDEC_ST;
14155                 tg3_flag_set(tp, NVRAM_BUFFERED);
14156                 tg3_flag_set(tp, FLASH);
14157                 tp->nvram_pagesize = 256;
14158                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14159                         tp->nvram_size = (protect ?
14160                                           TG3_NVRAM_SIZE_64KB :
14161                                           TG3_NVRAM_SIZE_128KB);
14162                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14163                         tp->nvram_size = (protect ?
14164                                           TG3_NVRAM_SIZE_64KB :
14165                                           TG3_NVRAM_SIZE_256KB);
14166                 else
14167                         tp->nvram_size = (protect ?
14168                                           TG3_NVRAM_SIZE_128KB :
14169                                           TG3_NVRAM_SIZE_512KB);
14170                 break;
14171         }
14172 }
14173
14174 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14175 {
14176         u32 nvcfg1;
14177
14178         nvcfg1 = tr32(NVRAM_CFG1);
14179
14180         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14181         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14182         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14183         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14184         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14185                 tp->nvram_jedecnum = JEDEC_ATMEL;
14186                 tg3_flag_set(tp, NVRAM_BUFFERED);
14187                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14188
14189                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14190                 tw32(NVRAM_CFG1, nvcfg1);
14191                 break;
14192         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14193         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14194         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14195         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14196                 tp->nvram_jedecnum = JEDEC_ATMEL;
14197                 tg3_flag_set(tp, NVRAM_BUFFERED);
14198                 tg3_flag_set(tp, FLASH);
14199                 tp->nvram_pagesize = 264;
14200                 break;
14201         case FLASH_5752VENDOR_ST_M45PE10:
14202         case FLASH_5752VENDOR_ST_M45PE20:
14203         case FLASH_5752VENDOR_ST_M45PE40:
14204                 tp->nvram_jedecnum = JEDEC_ST;
14205                 tg3_flag_set(tp, NVRAM_BUFFERED);
14206                 tg3_flag_set(tp, FLASH);
14207                 tp->nvram_pagesize = 256;
14208                 break;
14209         }
14210 }
14211
14212 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14213 {
14214         u32 nvcfg1, protect = 0;
14215
14216         nvcfg1 = tr32(NVRAM_CFG1);
14217
14218         /* NVRAM protection for TPM */
14219         if (nvcfg1 & (1 << 27)) {
14220                 tg3_flag_set(tp, PROTECTED_NVRAM);
14221                 protect = 1;
14222         }
14223
14224         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14225         switch (nvcfg1) {
14226         case FLASH_5761VENDOR_ATMEL_ADB021D:
14227         case FLASH_5761VENDOR_ATMEL_ADB041D:
14228         case FLASH_5761VENDOR_ATMEL_ADB081D:
14229         case FLASH_5761VENDOR_ATMEL_ADB161D:
14230         case FLASH_5761VENDOR_ATMEL_MDB021D:
14231         case FLASH_5761VENDOR_ATMEL_MDB041D:
14232         case FLASH_5761VENDOR_ATMEL_MDB081D:
14233         case FLASH_5761VENDOR_ATMEL_MDB161D:
14234                 tp->nvram_jedecnum = JEDEC_ATMEL;
14235                 tg3_flag_set(tp, NVRAM_BUFFERED);
14236                 tg3_flag_set(tp, FLASH);
14237                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14238                 tp->nvram_pagesize = 256;
14239                 break;
14240         case FLASH_5761VENDOR_ST_A_M45PE20:
14241         case FLASH_5761VENDOR_ST_A_M45PE40:
14242         case FLASH_5761VENDOR_ST_A_M45PE80:
14243         case FLASH_5761VENDOR_ST_A_M45PE16:
14244         case FLASH_5761VENDOR_ST_M_M45PE20:
14245         case FLASH_5761VENDOR_ST_M_M45PE40:
14246         case FLASH_5761VENDOR_ST_M_M45PE80:
14247         case FLASH_5761VENDOR_ST_M_M45PE16:
14248                 tp->nvram_jedecnum = JEDEC_ST;
14249                 tg3_flag_set(tp, NVRAM_BUFFERED);
14250                 tg3_flag_set(tp, FLASH);
14251                 tp->nvram_pagesize = 256;
14252                 break;
14253         }
14254
14255         if (protect) {
14256                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14257         } else {
14258                 switch (nvcfg1) {
14259                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14260                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14261                 case FLASH_5761VENDOR_ST_A_M45PE16:
14262                 case FLASH_5761VENDOR_ST_M_M45PE16:
14263                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14264                         break;
14265                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14266                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14267                 case FLASH_5761VENDOR_ST_A_M45PE80:
14268                 case FLASH_5761VENDOR_ST_M_M45PE80:
14269                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14270                         break;
14271                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14272                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14273                 case FLASH_5761VENDOR_ST_A_M45PE40:
14274                 case FLASH_5761VENDOR_ST_M_M45PE40:
14275                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14276                         break;
14277                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14278                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14279                 case FLASH_5761VENDOR_ST_A_M45PE20:
14280                 case FLASH_5761VENDOR_ST_M_M45PE20:
14281                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14282                         break;
14283                 }
14284         }
14285 }
14286
14287 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14288 {
14289         tp->nvram_jedecnum = JEDEC_ATMEL;
14290         tg3_flag_set(tp, NVRAM_BUFFERED);
14291         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14292 }
14293
14294 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14295 {
14296         u32 nvcfg1;
14297
14298         nvcfg1 = tr32(NVRAM_CFG1);
14299
14300         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14301         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14302         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14303                 tp->nvram_jedecnum = JEDEC_ATMEL;
14304                 tg3_flag_set(tp, NVRAM_BUFFERED);
14305                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14306
14307                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14308                 tw32(NVRAM_CFG1, nvcfg1);
14309                 return;
14310         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14311         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14312         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14313         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14314         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14315         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14316         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14317                 tp->nvram_jedecnum = JEDEC_ATMEL;
14318                 tg3_flag_set(tp, NVRAM_BUFFERED);
14319                 tg3_flag_set(tp, FLASH);
14320
14321                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14322                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14323                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14324                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14325                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14326                         break;
14327                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14328                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14329                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14330                         break;
14331                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14332                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14333                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14334                         break;
14335                 }
14336                 break;
14337         case FLASH_5752VENDOR_ST_M45PE10:
14338         case FLASH_5752VENDOR_ST_M45PE20:
14339         case FLASH_5752VENDOR_ST_M45PE40:
14340                 tp->nvram_jedecnum = JEDEC_ST;
14341                 tg3_flag_set(tp, NVRAM_BUFFERED);
14342                 tg3_flag_set(tp, FLASH);
14343
14344                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14345                 case FLASH_5752VENDOR_ST_M45PE10:
14346                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14347                         break;
14348                 case FLASH_5752VENDOR_ST_M45PE20:
14349                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14350                         break;
14351                 case FLASH_5752VENDOR_ST_M45PE40:
14352                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14353                         break;
14354                 }
14355                 break;
14356         default:
14357                 tg3_flag_set(tp, NO_NVRAM);
14358                 return;
14359         }
14360
14361         tg3_nvram_get_pagesize(tp, nvcfg1);
14362         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14363                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14364 }
14365
14366
14367 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14368 {
14369         u32 nvcfg1;
14370
14371         nvcfg1 = tr32(NVRAM_CFG1);
14372
14373         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14374         case FLASH_5717VENDOR_ATMEL_EEPROM:
14375         case FLASH_5717VENDOR_MICRO_EEPROM:
14376                 tp->nvram_jedecnum = JEDEC_ATMEL;
14377                 tg3_flag_set(tp, NVRAM_BUFFERED);
14378                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14379
14380                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14381                 tw32(NVRAM_CFG1, nvcfg1);
14382                 return;
14383         case FLASH_5717VENDOR_ATMEL_MDB011D:
14384         case FLASH_5717VENDOR_ATMEL_ADB011B:
14385         case FLASH_5717VENDOR_ATMEL_ADB011D:
14386         case FLASH_5717VENDOR_ATMEL_MDB021D:
14387         case FLASH_5717VENDOR_ATMEL_ADB021B:
14388         case FLASH_5717VENDOR_ATMEL_ADB021D:
14389         case FLASH_5717VENDOR_ATMEL_45USPT:
14390                 tp->nvram_jedecnum = JEDEC_ATMEL;
14391                 tg3_flag_set(tp, NVRAM_BUFFERED);
14392                 tg3_flag_set(tp, FLASH);
14393
14394                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14395                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14396                         /* Detect size with tg3_nvram_get_size() */
14397                         break;
14398                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14399                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14400                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14401                         break;
14402                 default:
14403                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14404                         break;
14405                 }
14406                 break;
14407         case FLASH_5717VENDOR_ST_M_M25PE10:
14408         case FLASH_5717VENDOR_ST_A_M25PE10:
14409         case FLASH_5717VENDOR_ST_M_M45PE10:
14410         case FLASH_5717VENDOR_ST_A_M45PE10:
14411         case FLASH_5717VENDOR_ST_M_M25PE20:
14412         case FLASH_5717VENDOR_ST_A_M25PE20:
14413         case FLASH_5717VENDOR_ST_M_M45PE20:
14414         case FLASH_5717VENDOR_ST_A_M45PE20:
14415         case FLASH_5717VENDOR_ST_25USPT:
14416         case FLASH_5717VENDOR_ST_45USPT:
14417                 tp->nvram_jedecnum = JEDEC_ST;
14418                 tg3_flag_set(tp, NVRAM_BUFFERED);
14419                 tg3_flag_set(tp, FLASH);
14420
14421                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14422                 case FLASH_5717VENDOR_ST_M_M25PE20:
14423                 case FLASH_5717VENDOR_ST_M_M45PE20:
14424                         /* Detect size with tg3_nvram_get_size() */
14425                         break;
14426                 case FLASH_5717VENDOR_ST_A_M25PE20:
14427                 case FLASH_5717VENDOR_ST_A_M45PE20:
14428                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14429                         break;
14430                 default:
14431                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14432                         break;
14433                 }
14434                 break;
14435         default:
14436                 tg3_flag_set(tp, NO_NVRAM);
14437                 return;
14438         }
14439
14440         tg3_nvram_get_pagesize(tp, nvcfg1);
14441         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14442                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14443 }
14444
14445 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14446 {
14447         u32 nvcfg1, nvmpinstrp;
14448
14449         nvcfg1 = tr32(NVRAM_CFG1);
14450         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14451
14452         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14453                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14454                         tg3_flag_set(tp, NO_NVRAM);
14455                         return;
14456                 }
14457
14458                 switch (nvmpinstrp) {
14459                 case FLASH_5762_EEPROM_HD:
14460                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14461                         break;
14462                 case FLASH_5762_EEPROM_LD:
14463                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14464                         break;
14465                 case FLASH_5720VENDOR_M_ST_M45PE20:
14466                         /* This pinstrap supports multiple sizes, so force it
14467                          * to read the actual size from location 0xf0.
14468                          */
14469                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14470                         break;
14471                 }
14472         }
14473
14474         switch (nvmpinstrp) {
14475         case FLASH_5720_EEPROM_HD:
14476         case FLASH_5720_EEPROM_LD:
14477                 tp->nvram_jedecnum = JEDEC_ATMEL;
14478                 tg3_flag_set(tp, NVRAM_BUFFERED);
14479
14480                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14481                 tw32(NVRAM_CFG1, nvcfg1);
14482                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14483                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14484                 else
14485                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14486                 return;
14487         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14488         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14489         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14490         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14491         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14492         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14493         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14494         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14495         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14496         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14497         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14498         case FLASH_5720VENDOR_ATMEL_45USPT:
14499                 tp->nvram_jedecnum = JEDEC_ATMEL;
14500                 tg3_flag_set(tp, NVRAM_BUFFERED);
14501                 tg3_flag_set(tp, FLASH);
14502
14503                 switch (nvmpinstrp) {
14504                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14505                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14506                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14507                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14508                         break;
14509                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14510                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14511                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14512                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14513                         break;
14514                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14515                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14516                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14517                         break;
14518                 default:
14519                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14520                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14521                         break;
14522                 }
14523                 break;
14524         case FLASH_5720VENDOR_M_ST_M25PE10:
14525         case FLASH_5720VENDOR_M_ST_M45PE10:
14526         case FLASH_5720VENDOR_A_ST_M25PE10:
14527         case FLASH_5720VENDOR_A_ST_M45PE10:
14528         case FLASH_5720VENDOR_M_ST_M25PE20:
14529         case FLASH_5720VENDOR_M_ST_M45PE20:
14530         case FLASH_5720VENDOR_A_ST_M25PE20:
14531         case FLASH_5720VENDOR_A_ST_M45PE20:
14532         case FLASH_5720VENDOR_M_ST_M25PE40:
14533         case FLASH_5720VENDOR_M_ST_M45PE40:
14534         case FLASH_5720VENDOR_A_ST_M25PE40:
14535         case FLASH_5720VENDOR_A_ST_M45PE40:
14536         case FLASH_5720VENDOR_M_ST_M25PE80:
14537         case FLASH_5720VENDOR_M_ST_M45PE80:
14538         case FLASH_5720VENDOR_A_ST_M25PE80:
14539         case FLASH_5720VENDOR_A_ST_M45PE80:
14540         case FLASH_5720VENDOR_ST_25USPT:
14541         case FLASH_5720VENDOR_ST_45USPT:
14542                 tp->nvram_jedecnum = JEDEC_ST;
14543                 tg3_flag_set(tp, NVRAM_BUFFERED);
14544                 tg3_flag_set(tp, FLASH);
14545
14546                 switch (nvmpinstrp) {
14547                 case FLASH_5720VENDOR_M_ST_M25PE20:
14548                 case FLASH_5720VENDOR_M_ST_M45PE20:
14549                 case FLASH_5720VENDOR_A_ST_M25PE20:
14550                 case FLASH_5720VENDOR_A_ST_M45PE20:
14551                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14552                         break;
14553                 case FLASH_5720VENDOR_M_ST_M25PE40:
14554                 case FLASH_5720VENDOR_M_ST_M45PE40:
14555                 case FLASH_5720VENDOR_A_ST_M25PE40:
14556                 case FLASH_5720VENDOR_A_ST_M45PE40:
14557                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14558                         break;
14559                 case FLASH_5720VENDOR_M_ST_M25PE80:
14560                 case FLASH_5720VENDOR_M_ST_M45PE80:
14561                 case FLASH_5720VENDOR_A_ST_M25PE80:
14562                 case FLASH_5720VENDOR_A_ST_M45PE80:
14563                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14564                         break;
14565                 default:
14566                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14567                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14568                         break;
14569                 }
14570                 break;
14571         default:
14572                 tg3_flag_set(tp, NO_NVRAM);
14573                 return;
14574         }
14575
14576         tg3_nvram_get_pagesize(tp, nvcfg1);
14577         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14578                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14579
14580         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14581                 u32 val;
14582
14583                 if (tg3_nvram_read(tp, 0, &val))
14584                         return;
14585
14586                 if (val != TG3_EEPROM_MAGIC &&
14587                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14588                         tg3_flag_set(tp, NO_NVRAM);
14589         }
14590 }
14591
14592 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14593 static void tg3_nvram_init(struct tg3 *tp)
14594 {
14595         if (tg3_flag(tp, IS_SSB_CORE)) {
14596                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14597                 tg3_flag_clear(tp, NVRAM);
14598                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14599                 tg3_flag_set(tp, NO_NVRAM);
14600                 return;
14601         }
14602
14603         tw32_f(GRC_EEPROM_ADDR,
14604              (EEPROM_ADDR_FSM_RESET |
14605               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14606                EEPROM_ADDR_CLKPERD_SHIFT)));
14607
14608         msleep(1);
14609
14610         /* Enable seeprom accesses. */
14611         tw32_f(GRC_LOCAL_CTRL,
14612              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14613         udelay(100);
14614
14615         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14616             tg3_asic_rev(tp) != ASIC_REV_5701) {
14617                 tg3_flag_set(tp, NVRAM);
14618
14619                 if (tg3_nvram_lock(tp)) {
14620                         netdev_warn(tp->dev,
14621                                     "Cannot get nvram lock, %s failed\n",
14622                                     __func__);
14623                         return;
14624                 }
14625                 tg3_enable_nvram_access(tp);
14626
14627                 tp->nvram_size = 0;
14628
14629                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14630                         tg3_get_5752_nvram_info(tp);
14631                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14632                         tg3_get_5755_nvram_info(tp);
14633                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14634                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14635                          tg3_asic_rev(tp) == ASIC_REV_5785)
14636                         tg3_get_5787_nvram_info(tp);
14637                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14638                         tg3_get_5761_nvram_info(tp);
14639                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14640                         tg3_get_5906_nvram_info(tp);
14641                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14642                          tg3_flag(tp, 57765_CLASS))
14643                         tg3_get_57780_nvram_info(tp);
14644                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14645                          tg3_asic_rev(tp) == ASIC_REV_5719)
14646                         tg3_get_5717_nvram_info(tp);
14647                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14648                          tg3_asic_rev(tp) == ASIC_REV_5762)
14649                         tg3_get_5720_nvram_info(tp);
14650                 else
14651                         tg3_get_nvram_info(tp);
14652
14653                 if (tp->nvram_size == 0)
14654                         tg3_get_nvram_size(tp);
14655
14656                 tg3_disable_nvram_access(tp);
14657                 tg3_nvram_unlock(tp);
14658
14659         } else {
14660                 tg3_flag_clear(tp, NVRAM);
14661                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14662
14663                 tg3_get_eeprom_size(tp);
14664         }
14665 }
14666
14667 struct subsys_tbl_ent {
14668         u16 subsys_vendor, subsys_devid;
14669         u32 phy_id;
14670 };
14671
14672 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14673         /* Broadcom boards. */
14674         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14675           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14676         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14677           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14678         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14679           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14680         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14681           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14682         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14683           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14684         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14685           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14686         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14687           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14688         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14689           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14690         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14691           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14692         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14693           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14694         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14695           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14696
14697         /* 3com boards. */
14698         { TG3PCI_SUBVENDOR_ID_3COM,
14699           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14700         { TG3PCI_SUBVENDOR_ID_3COM,
14701           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14702         { TG3PCI_SUBVENDOR_ID_3COM,
14703           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14704         { TG3PCI_SUBVENDOR_ID_3COM,
14705           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14706         { TG3PCI_SUBVENDOR_ID_3COM,
14707           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14708
14709         /* DELL boards. */
14710         { TG3PCI_SUBVENDOR_ID_DELL,
14711           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14712         { TG3PCI_SUBVENDOR_ID_DELL,
14713           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14714         { TG3PCI_SUBVENDOR_ID_DELL,
14715           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14716         { TG3PCI_SUBVENDOR_ID_DELL,
14717           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14718
14719         /* Compaq boards. */
14720         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14721           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14722         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14723           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14724         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14725           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14726         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14727           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14728         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14729           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14730
14731         /* IBM boards. */
14732         { TG3PCI_SUBVENDOR_ID_IBM,
14733           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14734 };
14735
14736 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14737 {
14738         int i;
14739
14740         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14741                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14742                      tp->pdev->subsystem_vendor) &&
14743                     (subsys_id_to_phy_id[i].subsys_devid ==
14744                      tp->pdev->subsystem_device))
14745                         return &subsys_id_to_phy_id[i];
14746         }
14747         return NULL;
14748 }
14749
14750 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14751 {
14752         u32 val;
14753
14754         tp->phy_id = TG3_PHY_ID_INVALID;
14755         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14756
14757         /* Assume an onboard device and WOL capable by default.  */
14758         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14759         tg3_flag_set(tp, WOL_CAP);
14760
14761         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14762                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14763                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14764                         tg3_flag_set(tp, IS_NIC);
14765                 }
14766                 val = tr32(VCPU_CFGSHDW);
14767                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14768                         tg3_flag_set(tp, ASPM_WORKAROUND);
14769                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14770                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14771                         tg3_flag_set(tp, WOL_ENABLE);
14772                         device_set_wakeup_enable(&tp->pdev->dev, true);
14773                 }
14774                 goto done;
14775         }
14776
14777         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14778         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14779                 u32 nic_cfg, led_cfg;
14780                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14781                 int eeprom_phy_serdes = 0;
14782
14783                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14784                 tp->nic_sram_data_cfg = nic_cfg;
14785
14786                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14787                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14788                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14789                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14790                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14791                     (ver > 0) && (ver < 0x100))
14792                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14793
14794                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14795                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14796
14797                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14798                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14799                         eeprom_phy_serdes = 1;
14800
14801                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14802                 if (nic_phy_id != 0) {
14803                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14804                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14805
14806                         eeprom_phy_id  = (id1 >> 16) << 10;
14807                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14808                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14809                 } else
14810                         eeprom_phy_id = 0;
14811
14812                 tp->phy_id = eeprom_phy_id;
14813                 if (eeprom_phy_serdes) {
14814                         if (!tg3_flag(tp, 5705_PLUS))
14815                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14816                         else
14817                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14818                 }
14819
14820                 if (tg3_flag(tp, 5750_PLUS))
14821                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14822                                     SHASTA_EXT_LED_MODE_MASK);
14823                 else
14824                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14825
14826                 switch (led_cfg) {
14827                 default:
14828                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14829                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14830                         break;
14831
14832                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14833                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14834                         break;
14835
14836                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14837                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14838
14839                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14840                          * read on some older 5700/5701 bootcode.
14841                          */
14842                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14843                             tg3_asic_rev(tp) == ASIC_REV_5701)
14844                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14845
14846                         break;
14847
14848                 case SHASTA_EXT_LED_SHARED:
14849                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14850                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14851                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14852                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14853                                                  LED_CTRL_MODE_PHY_2);
14854                         break;
14855
14856                 case SHASTA_EXT_LED_MAC:
14857                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14858                         break;
14859
14860                 case SHASTA_EXT_LED_COMBO:
14861                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14862                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14863                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14864                                                  LED_CTRL_MODE_PHY_2);
14865                         break;
14866
14867                 }
14868
14869                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14870                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14871                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14872                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14873
14874                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14875                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14876
14877                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14878                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14879                         if ((tp->pdev->subsystem_vendor ==
14880                              PCI_VENDOR_ID_ARIMA) &&
14881                             (tp->pdev->subsystem_device == 0x205a ||
14882                              tp->pdev->subsystem_device == 0x2063))
14883                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14884                 } else {
14885                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14886                         tg3_flag_set(tp, IS_NIC);
14887                 }
14888
14889                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14890                         tg3_flag_set(tp, ENABLE_ASF);
14891                         if (tg3_flag(tp, 5750_PLUS))
14892                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14893                 }
14894
14895                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14896                     tg3_flag(tp, 5750_PLUS))
14897                         tg3_flag_set(tp, ENABLE_APE);
14898
14899                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14900                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14901                         tg3_flag_clear(tp, WOL_CAP);
14902
14903                 if (tg3_flag(tp, WOL_CAP) &&
14904                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14905                         tg3_flag_set(tp, WOL_ENABLE);
14906                         device_set_wakeup_enable(&tp->pdev->dev, true);
14907                 }
14908
14909                 if (cfg2 & (1 << 17))
14910                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14911
14912                 /* serdes signal pre-emphasis in register 0x590 set by */
14913                 /* bootcode if bit 18 is set */
14914                 if (cfg2 & (1 << 18))
14915                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14916
14917                 if ((tg3_flag(tp, 57765_PLUS) ||
14918                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14919                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14920                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14921                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14922
14923                 if (tg3_flag(tp, PCI_EXPRESS)) {
14924                         u32 cfg3;
14925
14926                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14927                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14928                             !tg3_flag(tp, 57765_PLUS) &&
14929                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14930                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14931                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14932                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14933                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14934                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14935                 }
14936
14937                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14938                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14939                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14940                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14941                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14942                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14943         }
14944 done:
14945         if (tg3_flag(tp, WOL_CAP))
14946                 device_set_wakeup_enable(&tp->pdev->dev,
14947                                          tg3_flag(tp, WOL_ENABLE));
14948         else
14949                 device_set_wakeup_capable(&tp->pdev->dev, false);
14950 }
14951
14952 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14953 {
14954         int i, err;
14955         u32 val2, off = offset * 8;
14956
14957         err = tg3_nvram_lock(tp);
14958         if (err)
14959                 return err;
14960
14961         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14962         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14963                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14964         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14965         udelay(10);
14966
14967         for (i = 0; i < 100; i++) {
14968                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14969                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14970                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14971                         break;
14972                 }
14973                 udelay(10);
14974         }
14975
14976         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14977
14978         tg3_nvram_unlock(tp);
14979         if (val2 & APE_OTP_STATUS_CMD_DONE)
14980                 return 0;
14981
14982         return -EBUSY;
14983 }
14984
14985 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14986 {
14987         int i;
14988         u32 val;
14989
14990         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14991         tw32(OTP_CTRL, cmd);
14992
14993         /* Wait for up to 1 ms for command to execute. */
14994         for (i = 0; i < 100; i++) {
14995                 val = tr32(OTP_STATUS);
14996                 if (val & OTP_STATUS_CMD_DONE)
14997                         break;
14998                 udelay(10);
14999         }
15000
15001         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15002 }
15003
15004 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15005  * configuration is a 32-bit value that straddles the alignment boundary.
15006  * We do two 32-bit reads and then shift and merge the results.
15007  */
15008 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15009 {
15010         u32 bhalf_otp, thalf_otp;
15011
15012         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15013
15014         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15015                 return 0;
15016
15017         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15018
15019         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15020                 return 0;
15021
15022         thalf_otp = tr32(OTP_READ_DATA);
15023
15024         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15025
15026         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15027                 return 0;
15028
15029         bhalf_otp = tr32(OTP_READ_DATA);
15030
15031         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15032 }
15033
15034 static void tg3_phy_init_link_config(struct tg3 *tp)
15035 {
15036         u32 adv = ADVERTISED_Autoneg;
15037
15038         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15039                 adv |= ADVERTISED_1000baseT_Half |
15040                        ADVERTISED_1000baseT_Full;
15041
15042         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15043                 adv |= ADVERTISED_100baseT_Half |
15044                        ADVERTISED_100baseT_Full |
15045                        ADVERTISED_10baseT_Half |
15046                        ADVERTISED_10baseT_Full |
15047                        ADVERTISED_TP;
15048         else
15049                 adv |= ADVERTISED_FIBRE;
15050
15051         tp->link_config.advertising = adv;
15052         tp->link_config.speed = SPEED_UNKNOWN;
15053         tp->link_config.duplex = DUPLEX_UNKNOWN;
15054         tp->link_config.autoneg = AUTONEG_ENABLE;
15055         tp->link_config.active_speed = SPEED_UNKNOWN;
15056         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15057
15058         tp->old_link = -1;
15059 }
15060
15061 static int tg3_phy_probe(struct tg3 *tp)
15062 {
15063         u32 hw_phy_id_1, hw_phy_id_2;
15064         u32 hw_phy_id, hw_phy_id_masked;
15065         int err;
15066
15067         /* flow control autonegotiation is default behavior */
15068         tg3_flag_set(tp, PAUSE_AUTONEG);
15069         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15070
15071         if (tg3_flag(tp, ENABLE_APE)) {
15072                 switch (tp->pci_fn) {
15073                 case 0:
15074                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15075                         break;
15076                 case 1:
15077                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15078                         break;
15079                 case 2:
15080                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15081                         break;
15082                 case 3:
15083                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15084                         break;
15085                 }
15086         }
15087
15088         if (!tg3_flag(tp, ENABLE_ASF) &&
15089             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15090             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15091                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15092                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15093
15094         if (tg3_flag(tp, USE_PHYLIB))
15095                 return tg3_phy_init(tp);
15096
15097         /* Reading the PHY ID register can conflict with ASF
15098          * firmware access to the PHY hardware.
15099          */
15100         err = 0;
15101         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15102                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15103         } else {
15104                 /* Now read the physical PHY_ID from the chip and verify
15105                  * that it is sane.  If it doesn't look good, we fall back
15106                  * to either the hard-coded table based PHY_ID and failing
15107                  * that the value found in the eeprom area.
15108                  */
15109                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15110                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15111
15112                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15113                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15114                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15115
15116                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15117         }
15118
15119         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15120                 tp->phy_id = hw_phy_id;
15121                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15122                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15123                 else
15124                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15125         } else {
15126                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15127                         /* Do nothing, phy ID already set up in
15128                          * tg3_get_eeprom_hw_cfg().
15129                          */
15130                 } else {
15131                         struct subsys_tbl_ent *p;
15132
15133                         /* No eeprom signature?  Try the hardcoded
15134                          * subsys device table.
15135                          */
15136                         p = tg3_lookup_by_subsys(tp);
15137                         if (p) {
15138                                 tp->phy_id = p->phy_id;
15139                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15140                                 /* For now we saw the IDs 0xbc050cd0,
15141                                  * 0xbc050f80 and 0xbc050c30 on devices
15142                                  * connected to an BCM4785 and there are
15143                                  * probably more. Just assume that the phy is
15144                                  * supported when it is connected to a SSB core
15145                                  * for now.
15146                                  */
15147                                 return -ENODEV;
15148                         }
15149
15150                         if (!tp->phy_id ||
15151                             tp->phy_id == TG3_PHY_ID_BCM8002)
15152                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15153                 }
15154         }
15155
15156         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15157             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15158              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15159              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15160              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15161              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15162               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15163              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15164               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15165                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15166
15167                 tp->eee.supported = SUPPORTED_100baseT_Full |
15168                                     SUPPORTED_1000baseT_Full;
15169                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15170                                      ADVERTISED_1000baseT_Full;
15171                 tp->eee.eee_enabled = 1;
15172                 tp->eee.tx_lpi_enabled = 1;
15173                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15174         }
15175
15176         tg3_phy_init_link_config(tp);
15177
15178         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15179             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15180             !tg3_flag(tp, ENABLE_APE) &&
15181             !tg3_flag(tp, ENABLE_ASF)) {
15182                 u32 bmsr, dummy;
15183
15184                 tg3_readphy(tp, MII_BMSR, &bmsr);
15185                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15186                     (bmsr & BMSR_LSTATUS))
15187                         goto skip_phy_reset;
15188
15189                 err = tg3_phy_reset(tp);
15190                 if (err)
15191                         return err;
15192
15193                 tg3_phy_set_wirespeed(tp);
15194
15195                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15196                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15197                                             tp->link_config.flowctrl);
15198
15199                         tg3_writephy(tp, MII_BMCR,
15200                                      BMCR_ANENABLE | BMCR_ANRESTART);
15201                 }
15202         }
15203
15204 skip_phy_reset:
15205         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15206                 err = tg3_init_5401phy_dsp(tp);
15207                 if (err)
15208                         return err;
15209
15210                 err = tg3_init_5401phy_dsp(tp);
15211         }
15212
15213         return err;
15214 }
15215
15216 static void tg3_read_vpd(struct tg3 *tp)
15217 {
15218         u8 *vpd_data;
15219         unsigned int block_end, rosize, len;
15220         u32 vpdlen;
15221         int j, i = 0;
15222
15223         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15224         if (!vpd_data)
15225                 goto out_no_vpd;
15226
15227         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15228         if (i < 0)
15229                 goto out_not_found;
15230
15231         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15232         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15233         i += PCI_VPD_LRDT_TAG_SIZE;
15234
15235         if (block_end > vpdlen)
15236                 goto out_not_found;
15237
15238         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15239                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15240         if (j > 0) {
15241                 len = pci_vpd_info_field_size(&vpd_data[j]);
15242
15243                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15244                 if (j + len > block_end || len != 4 ||
15245                     memcmp(&vpd_data[j], "1028", 4))
15246                         goto partno;
15247
15248                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15249                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15250                 if (j < 0)
15251                         goto partno;
15252
15253                 len = pci_vpd_info_field_size(&vpd_data[j]);
15254
15255                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15256                 if (j + len > block_end)
15257                         goto partno;
15258
15259                 if (len >= sizeof(tp->fw_ver))
15260                         len = sizeof(tp->fw_ver) - 1;
15261                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15262                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15263                          &vpd_data[j]);
15264         }
15265
15266 partno:
15267         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15268                                       PCI_VPD_RO_KEYWORD_PARTNO);
15269         if (i < 0)
15270                 goto out_not_found;
15271
15272         len = pci_vpd_info_field_size(&vpd_data[i]);
15273
15274         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15275         if (len > TG3_BPN_SIZE ||
15276             (len + i) > vpdlen)
15277                 goto out_not_found;
15278
15279         memcpy(tp->board_part_number, &vpd_data[i], len);
15280
15281 out_not_found:
15282         kfree(vpd_data);
15283         if (tp->board_part_number[0])
15284                 return;
15285
15286 out_no_vpd:
15287         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15288                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15289                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15290                         strcpy(tp->board_part_number, "BCM5717");
15291                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15292                         strcpy(tp->board_part_number, "BCM5718");
15293                 else
15294                         goto nomatch;
15295         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15296                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15297                         strcpy(tp->board_part_number, "BCM57780");
15298                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15299                         strcpy(tp->board_part_number, "BCM57760");
15300                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15301                         strcpy(tp->board_part_number, "BCM57790");
15302                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15303                         strcpy(tp->board_part_number, "BCM57788");
15304                 else
15305                         goto nomatch;
15306         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15307                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15308                         strcpy(tp->board_part_number, "BCM57761");
15309                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15310                         strcpy(tp->board_part_number, "BCM57765");
15311                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15312                         strcpy(tp->board_part_number, "BCM57781");
15313                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15314                         strcpy(tp->board_part_number, "BCM57785");
15315                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15316                         strcpy(tp->board_part_number, "BCM57791");
15317                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15318                         strcpy(tp->board_part_number, "BCM57795");
15319                 else
15320                         goto nomatch;
15321         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15322                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15323                         strcpy(tp->board_part_number, "BCM57762");
15324                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15325                         strcpy(tp->board_part_number, "BCM57766");
15326                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15327                         strcpy(tp->board_part_number, "BCM57782");
15328                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15329                         strcpy(tp->board_part_number, "BCM57786");
15330                 else
15331                         goto nomatch;
15332         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15333                 strcpy(tp->board_part_number, "BCM95906");
15334         } else {
15335 nomatch:
15336                 strcpy(tp->board_part_number, "none");
15337         }
15338 }
15339
15340 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15341 {
15342         u32 val;
15343
15344         if (tg3_nvram_read(tp, offset, &val) ||
15345             (val & 0xfc000000) != 0x0c000000 ||
15346             tg3_nvram_read(tp, offset + 4, &val) ||
15347             val != 0)
15348                 return 0;
15349
15350         return 1;
15351 }
15352
15353 static void tg3_read_bc_ver(struct tg3 *tp)
15354 {
15355         u32 val, offset, start, ver_offset;
15356         int i, dst_off;
15357         bool newver = false;
15358
15359         if (tg3_nvram_read(tp, 0xc, &offset) ||
15360             tg3_nvram_read(tp, 0x4, &start))
15361                 return;
15362
15363         offset = tg3_nvram_logical_addr(tp, offset);
15364
15365         if (tg3_nvram_read(tp, offset, &val))
15366                 return;
15367
15368         if ((val & 0xfc000000) == 0x0c000000) {
15369                 if (tg3_nvram_read(tp, offset + 4, &val))
15370                         return;
15371
15372                 if (val == 0)
15373                         newver = true;
15374         }
15375
15376         dst_off = strlen(tp->fw_ver);
15377
15378         if (newver) {
15379                 if (TG3_VER_SIZE - dst_off < 16 ||
15380                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15381                         return;
15382
15383                 offset = offset + ver_offset - start;
15384                 for (i = 0; i < 16; i += 4) {
15385                         __be32 v;
15386                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15387                                 return;
15388
15389                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15390                 }
15391         } else {
15392                 u32 major, minor;
15393
15394                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15395                         return;
15396
15397                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15398                         TG3_NVM_BCVER_MAJSFT;
15399                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15400                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15401                          "v%d.%02d", major, minor);
15402         }
15403 }
15404
15405 static void tg3_read_hwsb_ver(struct tg3 *tp)
15406 {
15407         u32 val, major, minor;
15408
15409         /* Use native endian representation */
15410         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15411                 return;
15412
15413         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15414                 TG3_NVM_HWSB_CFG1_MAJSFT;
15415         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15416                 TG3_NVM_HWSB_CFG1_MINSFT;
15417
15418         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15419 }
15420
15421 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15422 {
15423         u32 offset, major, minor, build;
15424
15425         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15426
15427         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15428                 return;
15429
15430         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15431         case TG3_EEPROM_SB_REVISION_0:
15432                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15433                 break;
15434         case TG3_EEPROM_SB_REVISION_2:
15435                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15436                 break;
15437         case TG3_EEPROM_SB_REVISION_3:
15438                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15439                 break;
15440         case TG3_EEPROM_SB_REVISION_4:
15441                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15442                 break;
15443         case TG3_EEPROM_SB_REVISION_5:
15444                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15445                 break;
15446         case TG3_EEPROM_SB_REVISION_6:
15447                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15448                 break;
15449         default:
15450                 return;
15451         }
15452
15453         if (tg3_nvram_read(tp, offset, &val))
15454                 return;
15455
15456         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15457                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15458         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15459                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15460         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15461
15462         if (minor > 99 || build > 26)
15463                 return;
15464
15465         offset = strlen(tp->fw_ver);
15466         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15467                  " v%d.%02d", major, minor);
15468
15469         if (build > 0) {
15470                 offset = strlen(tp->fw_ver);
15471                 if (offset < TG3_VER_SIZE - 1)
15472                         tp->fw_ver[offset] = 'a' + build - 1;
15473         }
15474 }
15475
15476 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15477 {
15478         u32 val, offset, start;
15479         int i, vlen;
15480
15481         for (offset = TG3_NVM_DIR_START;
15482              offset < TG3_NVM_DIR_END;
15483              offset += TG3_NVM_DIRENT_SIZE) {
15484                 if (tg3_nvram_read(tp, offset, &val))
15485                         return;
15486
15487                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15488                         break;
15489         }
15490
15491         if (offset == TG3_NVM_DIR_END)
15492                 return;
15493
15494         if (!tg3_flag(tp, 5705_PLUS))
15495                 start = 0x08000000;
15496         else if (tg3_nvram_read(tp, offset - 4, &start))
15497                 return;
15498
15499         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15500             !tg3_fw_img_is_valid(tp, offset) ||
15501             tg3_nvram_read(tp, offset + 8, &val))
15502                 return;
15503
15504         offset += val - start;
15505
15506         vlen = strlen(tp->fw_ver);
15507
15508         tp->fw_ver[vlen++] = ',';
15509         tp->fw_ver[vlen++] = ' ';
15510
15511         for (i = 0; i < 4; i++) {
15512                 __be32 v;
15513                 if (tg3_nvram_read_be32(tp, offset, &v))
15514                         return;
15515
15516                 offset += sizeof(v);
15517
15518                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15519                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15520                         break;
15521                 }
15522
15523                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15524                 vlen += sizeof(v);
15525         }
15526 }
15527
15528 static void tg3_probe_ncsi(struct tg3 *tp)
15529 {
15530         u32 apedata;
15531
15532         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15533         if (apedata != APE_SEG_SIG_MAGIC)
15534                 return;
15535
15536         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15537         if (!(apedata & APE_FW_STATUS_READY))
15538                 return;
15539
15540         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15541                 tg3_flag_set(tp, APE_HAS_NCSI);
15542 }
15543
15544 static void tg3_read_dash_ver(struct tg3 *tp)
15545 {
15546         int vlen;
15547         u32 apedata;
15548         char *fwtype;
15549
15550         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15551
15552         if (tg3_flag(tp, APE_HAS_NCSI))
15553                 fwtype = "NCSI";
15554         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15555                 fwtype = "SMASH";
15556         else
15557                 fwtype = "DASH";
15558
15559         vlen = strlen(tp->fw_ver);
15560
15561         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15562                  fwtype,
15563                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15564                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15565                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15566                  (apedata & APE_FW_VERSION_BLDMSK));
15567 }
15568
15569 static void tg3_read_otp_ver(struct tg3 *tp)
15570 {
15571         u32 val, val2;
15572
15573         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15574                 return;
15575
15576         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15577             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15578             TG3_OTP_MAGIC0_VALID(val)) {
15579                 u64 val64 = (u64) val << 32 | val2;
15580                 u32 ver = 0;
15581                 int i, vlen;
15582
15583                 for (i = 0; i < 7; i++) {
15584                         if ((val64 & 0xff) == 0)
15585                                 break;
15586                         ver = val64 & 0xff;
15587                         val64 >>= 8;
15588                 }
15589                 vlen = strlen(tp->fw_ver);
15590                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15591         }
15592 }
15593
15594 static void tg3_read_fw_ver(struct tg3 *tp)
15595 {
15596         u32 val;
15597         bool vpd_vers = false;
15598
15599         if (tp->fw_ver[0] != 0)
15600                 vpd_vers = true;
15601
15602         if (tg3_flag(tp, NO_NVRAM)) {
15603                 strcat(tp->fw_ver, "sb");
15604                 tg3_read_otp_ver(tp);
15605                 return;
15606         }
15607
15608         if (tg3_nvram_read(tp, 0, &val))
15609                 return;
15610
15611         if (val == TG3_EEPROM_MAGIC)
15612                 tg3_read_bc_ver(tp);
15613         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15614                 tg3_read_sb_ver(tp, val);
15615         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15616                 tg3_read_hwsb_ver(tp);
15617
15618         if (tg3_flag(tp, ENABLE_ASF)) {
15619                 if (tg3_flag(tp, ENABLE_APE)) {
15620                         tg3_probe_ncsi(tp);
15621                         if (!vpd_vers)
15622                                 tg3_read_dash_ver(tp);
15623                 } else if (!vpd_vers) {
15624                         tg3_read_mgmtfw_ver(tp);
15625                 }
15626         }
15627
15628         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15629 }
15630
15631 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15632 {
15633         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15634                 return TG3_RX_RET_MAX_SIZE_5717;
15635         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15636                 return TG3_RX_RET_MAX_SIZE_5700;
15637         else
15638                 return TG3_RX_RET_MAX_SIZE_5705;
15639 }
15640
15641 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15642         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15643         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15644         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15645         { },
15646 };
15647
15648 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15649 {
15650         struct pci_dev *peer;
15651         unsigned int func, devnr = tp->pdev->devfn & ~7;
15652
15653         for (func = 0; func < 8; func++) {
15654                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15655                 if (peer && peer != tp->pdev)
15656                         break;
15657                 pci_dev_put(peer);
15658         }
15659         /* 5704 can be configured in single-port mode, set peer to
15660          * tp->pdev in that case.
15661          */
15662         if (!peer) {
15663                 peer = tp->pdev;
15664                 return peer;
15665         }
15666
15667         /*
15668          * We don't need to keep the refcount elevated; there's no way
15669          * to remove one half of this device without removing the other
15670          */
15671         pci_dev_put(peer);
15672
15673         return peer;
15674 }
15675
15676 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15677 {
15678         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15679         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15680                 u32 reg;
15681
15682                 /* All devices that use the alternate
15683                  * ASIC REV location have a CPMU.
15684                  */
15685                 tg3_flag_set(tp, CPMU_PRESENT);
15686
15687                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15688                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15689                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15690                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15691                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15692                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15693                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15694                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15695                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15696                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15697                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15698                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15699                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15700                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15701                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15702                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15703                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15704                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15705                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15706                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15707                 else
15708                         reg = TG3PCI_PRODID_ASICREV;
15709
15710                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15711         }
15712
15713         /* Wrong chip ID in 5752 A0. This code can be removed later
15714          * as A0 is not in production.
15715          */
15716         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15717                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15718
15719         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15720                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15721
15722         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15723             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15724             tg3_asic_rev(tp) == ASIC_REV_5720)
15725                 tg3_flag_set(tp, 5717_PLUS);
15726
15727         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15728             tg3_asic_rev(tp) == ASIC_REV_57766)
15729                 tg3_flag_set(tp, 57765_CLASS);
15730
15731         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15732              tg3_asic_rev(tp) == ASIC_REV_5762)
15733                 tg3_flag_set(tp, 57765_PLUS);
15734
15735         /* Intentionally exclude ASIC_REV_5906 */
15736         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15737             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15738             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15739             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15740             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15741             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15742             tg3_flag(tp, 57765_PLUS))
15743                 tg3_flag_set(tp, 5755_PLUS);
15744
15745         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15746             tg3_asic_rev(tp) == ASIC_REV_5714)
15747                 tg3_flag_set(tp, 5780_CLASS);
15748
15749         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15750             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15751             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15752             tg3_flag(tp, 5755_PLUS) ||
15753             tg3_flag(tp, 5780_CLASS))
15754                 tg3_flag_set(tp, 5750_PLUS);
15755
15756         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15757             tg3_flag(tp, 5750_PLUS))
15758                 tg3_flag_set(tp, 5705_PLUS);
15759 }
15760
15761 static bool tg3_10_100_only_device(struct tg3 *tp,
15762                                    const struct pci_device_id *ent)
15763 {
15764         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15765
15766         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15767              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15768             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15769                 return true;
15770
15771         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15772                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15773                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15774                                 return true;
15775                 } else {
15776                         return true;
15777                 }
15778         }
15779
15780         return false;
15781 }
15782
15783 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15784 {
15785         u32 misc_ctrl_reg;
15786         u32 pci_state_reg, grc_misc_cfg;
15787         u32 val;
15788         u16 pci_cmd;
15789         int err;
15790
15791         /* Force memory write invalidate off.  If we leave it on,
15792          * then on 5700_BX chips we have to enable a workaround.
15793          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15794          * to match the cacheline size.  The Broadcom driver have this
15795          * workaround but turns MWI off all the times so never uses
15796          * it.  This seems to suggest that the workaround is insufficient.
15797          */
15798         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15799         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15800         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15801
15802         /* Important! -- Make sure register accesses are byteswapped
15803          * correctly.  Also, for those chips that require it, make
15804          * sure that indirect register accesses are enabled before
15805          * the first operation.
15806          */
15807         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15808                               &misc_ctrl_reg);
15809         tp->misc_host_ctrl |= (misc_ctrl_reg &
15810                                MISC_HOST_CTRL_CHIPREV);
15811         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15812                                tp->misc_host_ctrl);
15813
15814         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15815
15816         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15817          * we need to disable memory and use config. cycles
15818          * only to access all registers. The 5702/03 chips
15819          * can mistakenly decode the special cycles from the
15820          * ICH chipsets as memory write cycles, causing corruption
15821          * of register and memory space. Only certain ICH bridges
15822          * will drive special cycles with non-zero data during the
15823          * address phase which can fall within the 5703's address
15824          * range. This is not an ICH bug as the PCI spec allows
15825          * non-zero address during special cycles. However, only
15826          * these ICH bridges are known to drive non-zero addresses
15827          * during special cycles.
15828          *
15829          * Since special cycles do not cross PCI bridges, we only
15830          * enable this workaround if the 5703 is on the secondary
15831          * bus of these ICH bridges.
15832          */
15833         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15834             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15835                 static struct tg3_dev_id {
15836                         u32     vendor;
15837                         u32     device;
15838                         u32     rev;
15839                 } ich_chipsets[] = {
15840                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15841                           PCI_ANY_ID },
15842                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15843                           PCI_ANY_ID },
15844                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15845                           0xa },
15846                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15847                           PCI_ANY_ID },
15848                         { },
15849                 };
15850                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15851                 struct pci_dev *bridge = NULL;
15852
15853                 while (pci_id->vendor != 0) {
15854                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15855                                                 bridge);
15856                         if (!bridge) {
15857                                 pci_id++;
15858                                 continue;
15859                         }
15860                         if (pci_id->rev != PCI_ANY_ID) {
15861                                 if (bridge->revision > pci_id->rev)
15862                                         continue;
15863                         }
15864                         if (bridge->subordinate &&
15865                             (bridge->subordinate->number ==
15866                              tp->pdev->bus->number)) {
15867                                 tg3_flag_set(tp, ICH_WORKAROUND);
15868                                 pci_dev_put(bridge);
15869                                 break;
15870                         }
15871                 }
15872         }
15873
15874         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15875                 static struct tg3_dev_id {
15876                         u32     vendor;
15877                         u32     device;
15878                 } bridge_chipsets[] = {
15879                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15880                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15881                         { },
15882                 };
15883                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15884                 struct pci_dev *bridge = NULL;
15885
15886                 while (pci_id->vendor != 0) {
15887                         bridge = pci_get_device(pci_id->vendor,
15888                                                 pci_id->device,
15889                                                 bridge);
15890                         if (!bridge) {
15891                                 pci_id++;
15892                                 continue;
15893                         }
15894                         if (bridge->subordinate &&
15895                             (bridge->subordinate->number <=
15896                              tp->pdev->bus->number) &&
15897                             (bridge->subordinate->busn_res.end >=
15898                              tp->pdev->bus->number)) {
15899                                 tg3_flag_set(tp, 5701_DMA_BUG);
15900                                 pci_dev_put(bridge);
15901                                 break;
15902                         }
15903                 }
15904         }
15905
15906         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15907          * DMA addresses > 40-bit. This bridge may have other additional
15908          * 57xx devices behind it in some 4-port NIC designs for example.
15909          * Any tg3 device found behind the bridge will also need the 40-bit
15910          * DMA workaround.
15911          */
15912         if (tg3_flag(tp, 5780_CLASS)) {
15913                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15914                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15915         } else {
15916                 struct pci_dev *bridge = NULL;
15917
15918                 do {
15919                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15920                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15921                                                 bridge);
15922                         if (bridge && bridge->subordinate &&
15923                             (bridge->subordinate->number <=
15924                              tp->pdev->bus->number) &&
15925                             (bridge->subordinate->busn_res.end >=
15926                              tp->pdev->bus->number)) {
15927                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15928                                 pci_dev_put(bridge);
15929                                 break;
15930                         }
15931                 } while (bridge);
15932         }
15933
15934         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15935             tg3_asic_rev(tp) == ASIC_REV_5714)
15936                 tp->pdev_peer = tg3_find_peer(tp);
15937
15938         /* Determine TSO capabilities */
15939         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15940                 ; /* Do nothing. HW bug. */
15941         else if (tg3_flag(tp, 57765_PLUS))
15942                 tg3_flag_set(tp, HW_TSO_3);
15943         else if (tg3_flag(tp, 5755_PLUS) ||
15944                  tg3_asic_rev(tp) == ASIC_REV_5906)
15945                 tg3_flag_set(tp, HW_TSO_2);
15946         else if (tg3_flag(tp, 5750_PLUS)) {
15947                 tg3_flag_set(tp, HW_TSO_1);
15948                 tg3_flag_set(tp, TSO_BUG);
15949                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15950                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15951                         tg3_flag_clear(tp, TSO_BUG);
15952         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15953                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15954                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15955                 tg3_flag_set(tp, FW_TSO);
15956                 tg3_flag_set(tp, TSO_BUG);
15957                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15958                         tp->fw_needed = FIRMWARE_TG3TSO5;
15959                 else
15960                         tp->fw_needed = FIRMWARE_TG3TSO;
15961         }
15962
15963         /* Selectively allow TSO based on operating conditions */
15964         if (tg3_flag(tp, HW_TSO_1) ||
15965             tg3_flag(tp, HW_TSO_2) ||
15966             tg3_flag(tp, HW_TSO_3) ||
15967             tg3_flag(tp, FW_TSO)) {
15968                 /* For firmware TSO, assume ASF is disabled.
15969                  * We'll disable TSO later if we discover ASF
15970                  * is enabled in tg3_get_eeprom_hw_cfg().
15971                  */
15972                 tg3_flag_set(tp, TSO_CAPABLE);
15973         } else {
15974                 tg3_flag_clear(tp, TSO_CAPABLE);
15975                 tg3_flag_clear(tp, TSO_BUG);
15976                 tp->fw_needed = NULL;
15977         }
15978
15979         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15980                 tp->fw_needed = FIRMWARE_TG3;
15981
15982         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15983                 tp->fw_needed = FIRMWARE_TG357766;
15984
15985         tp->irq_max = 1;
15986
15987         if (tg3_flag(tp, 5750_PLUS)) {
15988                 tg3_flag_set(tp, SUPPORT_MSI);
15989                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15990                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15991                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15992                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15993                      tp->pdev_peer == tp->pdev))
15994                         tg3_flag_clear(tp, SUPPORT_MSI);
15995
15996                 if (tg3_flag(tp, 5755_PLUS) ||
15997                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15998                         tg3_flag_set(tp, 1SHOT_MSI);
15999                 }
16000
16001                 if (tg3_flag(tp, 57765_PLUS)) {
16002                         tg3_flag_set(tp, SUPPORT_MSIX);
16003                         tp->irq_max = TG3_IRQ_MAX_VECS;
16004                 }
16005         }
16006
16007         tp->txq_max = 1;
16008         tp->rxq_max = 1;
16009         if (tp->irq_max > 1) {
16010                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16011                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16012
16013                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16014                     tg3_asic_rev(tp) == ASIC_REV_5720)
16015                         tp->txq_max = tp->irq_max - 1;
16016         }
16017
16018         if (tg3_flag(tp, 5755_PLUS) ||
16019             tg3_asic_rev(tp) == ASIC_REV_5906)
16020                 tg3_flag_set(tp, SHORT_DMA_BUG);
16021
16022         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16023                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16024
16025         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16026             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16027             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16028             tg3_asic_rev(tp) == ASIC_REV_5762)
16029                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16030
16031         if (tg3_flag(tp, 57765_PLUS) &&
16032             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16033                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16034
16035         if (!tg3_flag(tp, 5705_PLUS) ||
16036             tg3_flag(tp, 5780_CLASS) ||
16037             tg3_flag(tp, USE_JUMBO_BDFLAG))
16038                 tg3_flag_set(tp, JUMBO_CAPABLE);
16039
16040         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16041                               &pci_state_reg);
16042
16043         if (pci_is_pcie(tp->pdev)) {
16044                 u16 lnkctl;
16045
16046                 tg3_flag_set(tp, PCI_EXPRESS);
16047
16048                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16049                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16050                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16051                                 tg3_flag_clear(tp, HW_TSO_2);
16052                                 tg3_flag_clear(tp, TSO_CAPABLE);
16053                         }
16054                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16055                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16056                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16057                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16058                                 tg3_flag_set(tp, CLKREQ_BUG);
16059                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16060                         tg3_flag_set(tp, L1PLLPD_EN);
16061                 }
16062         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16063                 /* BCM5785 devices are effectively PCIe devices, and should
16064                  * follow PCIe codepaths, but do not have a PCIe capabilities
16065                  * section.
16066                  */
16067                 tg3_flag_set(tp, PCI_EXPRESS);
16068         } else if (!tg3_flag(tp, 5705_PLUS) ||
16069                    tg3_flag(tp, 5780_CLASS)) {
16070                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16071                 if (!tp->pcix_cap) {
16072                         dev_err(&tp->pdev->dev,
16073                                 "Cannot find PCI-X capability, aborting\n");
16074                         return -EIO;
16075                 }
16076
16077                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16078                         tg3_flag_set(tp, PCIX_MODE);
16079         }
16080
16081         /* If we have an AMD 762 or VIA K8T800 chipset, write
16082          * reordering to the mailbox registers done by the host
16083          * controller can cause major troubles.  We read back from
16084          * every mailbox register write to force the writes to be
16085          * posted to the chip in order.
16086          */
16087         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16088             !tg3_flag(tp, PCI_EXPRESS))
16089                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16090
16091         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16092                              &tp->pci_cacheline_sz);
16093         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16094                              &tp->pci_lat_timer);
16095         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16096             tp->pci_lat_timer < 64) {
16097                 tp->pci_lat_timer = 64;
16098                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16099                                       tp->pci_lat_timer);
16100         }
16101
16102         /* Important! -- It is critical that the PCI-X hw workaround
16103          * situation is decided before the first MMIO register access.
16104          */
16105         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16106                 /* 5700 BX chips need to have their TX producer index
16107                  * mailboxes written twice to workaround a bug.
16108                  */
16109                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16110
16111                 /* If we are in PCI-X mode, enable register write workaround.
16112                  *
16113                  * The workaround is to use indirect register accesses
16114                  * for all chip writes not to mailbox registers.
16115                  */
16116                 if (tg3_flag(tp, PCIX_MODE)) {
16117                         u32 pm_reg;
16118
16119                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16120
16121                         /* The chip can have it's power management PCI config
16122                          * space registers clobbered due to this bug.
16123                          * So explicitly force the chip into D0 here.
16124                          */
16125                         pci_read_config_dword(tp->pdev,
16126                                               tp->pm_cap + PCI_PM_CTRL,
16127                                               &pm_reg);
16128                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16129                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16130                         pci_write_config_dword(tp->pdev,
16131                                                tp->pm_cap + PCI_PM_CTRL,
16132                                                pm_reg);
16133
16134                         /* Also, force SERR#/PERR# in PCI command. */
16135                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16136                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16137                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16138                 }
16139         }
16140
16141         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16142                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16143         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16144                 tg3_flag_set(tp, PCI_32BIT);
16145
16146         /* Chip-specific fixup from Broadcom driver */
16147         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16148             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16149                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16150                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16151         }
16152
16153         /* Default fast path register access methods */
16154         tp->read32 = tg3_read32;
16155         tp->write32 = tg3_write32;
16156         tp->read32_mbox = tg3_read32;
16157         tp->write32_mbox = tg3_write32;
16158         tp->write32_tx_mbox = tg3_write32;
16159         tp->write32_rx_mbox = tg3_write32;
16160
16161         /* Various workaround register access methods */
16162         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16163                 tp->write32 = tg3_write_indirect_reg32;
16164         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16165                  (tg3_flag(tp, PCI_EXPRESS) &&
16166                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16167                 /*
16168                  * Back to back register writes can cause problems on these
16169                  * chips, the workaround is to read back all reg writes
16170                  * except those to mailbox regs.
16171                  *
16172                  * See tg3_write_indirect_reg32().
16173                  */
16174                 tp->write32 = tg3_write_flush_reg32;
16175         }
16176
16177         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16178                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16179                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16180                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16181         }
16182
16183         if (tg3_flag(tp, ICH_WORKAROUND)) {
16184                 tp->read32 = tg3_read_indirect_reg32;
16185                 tp->write32 = tg3_write_indirect_reg32;
16186                 tp->read32_mbox = tg3_read_indirect_mbox;
16187                 tp->write32_mbox = tg3_write_indirect_mbox;
16188                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16189                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16190
16191                 iounmap(tp->regs);
16192                 tp->regs = NULL;
16193
16194                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16195                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16196                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16197         }
16198         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16199                 tp->read32_mbox = tg3_read32_mbox_5906;
16200                 tp->write32_mbox = tg3_write32_mbox_5906;
16201                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16202                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16203         }
16204
16205         if (tp->write32 == tg3_write_indirect_reg32 ||
16206             (tg3_flag(tp, PCIX_MODE) &&
16207              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16208               tg3_asic_rev(tp) == ASIC_REV_5701)))
16209                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16210
16211         /* The memory arbiter has to be enabled in order for SRAM accesses
16212          * to succeed.  Normally on powerup the tg3 chip firmware will make
16213          * sure it is enabled, but other entities such as system netboot
16214          * code might disable it.
16215          */
16216         val = tr32(MEMARB_MODE);
16217         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16218
16219         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16220         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16221             tg3_flag(tp, 5780_CLASS)) {
16222                 if (tg3_flag(tp, PCIX_MODE)) {
16223                         pci_read_config_dword(tp->pdev,
16224                                               tp->pcix_cap + PCI_X_STATUS,
16225                                               &val);
16226                         tp->pci_fn = val & 0x7;
16227                 }
16228         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16229                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16230                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16231                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16232                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16233                         val = tr32(TG3_CPMU_STATUS);
16234
16235                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16236                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16237                 else
16238                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16239                                      TG3_CPMU_STATUS_FSHFT_5719;
16240         }
16241
16242         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16243                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16244                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16245         }
16246
16247         /* Get eeprom hw config before calling tg3_set_power_state().
16248          * In particular, the TG3_FLAG_IS_NIC flag must be
16249          * determined before calling tg3_set_power_state() so that
16250          * we know whether or not to switch out of Vaux power.
16251          * When the flag is set, it means that GPIO1 is used for eeprom
16252          * write protect and also implies that it is a LOM where GPIOs
16253          * are not used to switch power.
16254          */
16255         tg3_get_eeprom_hw_cfg(tp);
16256
16257         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16258                 tg3_flag_clear(tp, TSO_CAPABLE);
16259                 tg3_flag_clear(tp, TSO_BUG);
16260                 tp->fw_needed = NULL;
16261         }
16262
16263         if (tg3_flag(tp, ENABLE_APE)) {
16264                 /* Allow reads and writes to the
16265                  * APE register and memory space.
16266                  */
16267                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16268                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16269                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16270                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16271                                        pci_state_reg);
16272
16273                 tg3_ape_lock_init(tp);
16274         }
16275
16276         /* Set up tp->grc_local_ctrl before calling
16277          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16278          * will bring 5700's external PHY out of reset.
16279          * It is also used as eeprom write protect on LOMs.
16280          */
16281         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16282         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16283             tg3_flag(tp, EEPROM_WRITE_PROT))
16284                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16285                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16286         /* Unused GPIO3 must be driven as output on 5752 because there
16287          * are no pull-up resistors on unused GPIO pins.
16288          */
16289         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16290                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16291
16292         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16293             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16294             tg3_flag(tp, 57765_CLASS))
16295                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16296
16297         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16298             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16299                 /* Turn off the debug UART. */
16300                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16301                 if (tg3_flag(tp, IS_NIC))
16302                         /* Keep VMain power. */
16303                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16304                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16305         }
16306
16307         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16308                 tp->grc_local_ctrl |=
16309                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16310
16311         /* Switch out of Vaux if it is a NIC */
16312         tg3_pwrsrc_switch_to_vmain(tp);
16313
16314         /* Derive initial jumbo mode from MTU assigned in
16315          * ether_setup() via the alloc_etherdev() call
16316          */
16317         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16318                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16319
16320         /* Determine WakeOnLan speed to use. */
16321         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16322             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16323             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16324             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16325                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16326         } else {
16327                 tg3_flag_set(tp, WOL_SPEED_100MB);
16328         }
16329
16330         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16331                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16332
16333         /* A few boards don't want Ethernet@WireSpeed phy feature */
16334         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16335             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16336              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16337              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16338             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16339             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16340                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16341
16342         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16343             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16344                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16345         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16346                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16347
16348         if (tg3_flag(tp, 5705_PLUS) &&
16349             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16350             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16351             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16352             !tg3_flag(tp, 57765_PLUS)) {
16353                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16354                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16355                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16356                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16357                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16358                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16359                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16360                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16361                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16362                 } else
16363                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16364         }
16365
16366         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16367             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16368                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16369                 if (tp->phy_otp == 0)
16370                         tp->phy_otp = TG3_OTP_DEFAULT;
16371         }
16372
16373         if (tg3_flag(tp, CPMU_PRESENT))
16374                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16375         else
16376                 tp->mi_mode = MAC_MI_MODE_BASE;
16377
16378         tp->coalesce_mode = 0;
16379         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16380             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16381                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16382
16383         /* Set these bits to enable statistics workaround. */
16384         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16385             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16386             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16387                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16388                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16389         }
16390
16391         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16392             tg3_asic_rev(tp) == ASIC_REV_57780)
16393                 tg3_flag_set(tp, USE_PHYLIB);
16394
16395         err = tg3_mdio_init(tp);
16396         if (err)
16397                 return err;
16398
16399         /* Initialize data/descriptor byte/word swapping. */
16400         val = tr32(GRC_MODE);
16401         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16402             tg3_asic_rev(tp) == ASIC_REV_5762)
16403                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16404                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16405                         GRC_MODE_B2HRX_ENABLE |
16406                         GRC_MODE_HTX2B_ENABLE |
16407                         GRC_MODE_HOST_STACKUP);
16408         else
16409                 val &= GRC_MODE_HOST_STACKUP;
16410
16411         tw32(GRC_MODE, val | tp->grc_mode);
16412
16413         tg3_switch_clocks(tp);
16414
16415         /* Clear this out for sanity. */
16416         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16417
16418         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16419                               &pci_state_reg);
16420         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16421             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16422                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16423                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16424                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16425                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16426                         void __iomem *sram_base;
16427
16428                         /* Write some dummy words into the SRAM status block
16429                          * area, see if it reads back correctly.  If the return
16430                          * value is bad, force enable the PCIX workaround.
16431                          */
16432                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16433
16434                         writel(0x00000000, sram_base);
16435                         writel(0x00000000, sram_base + 4);
16436                         writel(0xffffffff, sram_base + 4);
16437                         if (readl(sram_base) != 0x00000000)
16438                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16439                 }
16440         }
16441
16442         udelay(50);
16443         tg3_nvram_init(tp);
16444
16445         /* If the device has an NVRAM, no need to load patch firmware */
16446         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16447             !tg3_flag(tp, NO_NVRAM))
16448                 tp->fw_needed = NULL;
16449
16450         grc_misc_cfg = tr32(GRC_MISC_CFG);
16451         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16452
16453         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16454             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16455              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16456                 tg3_flag_set(tp, IS_5788);
16457
16458         if (!tg3_flag(tp, IS_5788) &&
16459             tg3_asic_rev(tp) != ASIC_REV_5700)
16460                 tg3_flag_set(tp, TAGGED_STATUS);
16461         if (tg3_flag(tp, TAGGED_STATUS)) {
16462                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16463                                       HOSTCC_MODE_CLRTICK_TXBD);
16464
16465                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16466                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16467                                        tp->misc_host_ctrl);
16468         }
16469
16470         /* Preserve the APE MAC_MODE bits */
16471         if (tg3_flag(tp, ENABLE_APE))
16472                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16473         else
16474                 tp->mac_mode = 0;
16475
16476         if (tg3_10_100_only_device(tp, ent))
16477                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16478
16479         err = tg3_phy_probe(tp);
16480         if (err) {
16481                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16482                 /* ... but do not return immediately ... */
16483                 tg3_mdio_fini(tp);
16484         }
16485
16486         tg3_read_vpd(tp);
16487         tg3_read_fw_ver(tp);
16488
16489         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16490                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16491         } else {
16492                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16493                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16494                 else
16495                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16496         }
16497
16498         /* 5700 {AX,BX} chips have a broken status block link
16499          * change bit implementation, so we must use the
16500          * status register in those cases.
16501          */
16502         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16503                 tg3_flag_set(tp, USE_LINKCHG_REG);
16504         else
16505                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16506
16507         /* The led_ctrl is set during tg3_phy_probe, here we might
16508          * have to force the link status polling mechanism based
16509          * upon subsystem IDs.
16510          */
16511         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16512             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16513             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16514                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16515                 tg3_flag_set(tp, USE_LINKCHG_REG);
16516         }
16517
16518         /* For all SERDES we poll the MAC status register. */
16519         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16520                 tg3_flag_set(tp, POLL_SERDES);
16521         else
16522                 tg3_flag_clear(tp, POLL_SERDES);
16523
16524         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16525         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16526         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16527             tg3_flag(tp, PCIX_MODE)) {
16528                 tp->rx_offset = NET_SKB_PAD;
16529 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16530                 tp->rx_copy_thresh = ~(u16)0;
16531 #endif
16532         }
16533
16534         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16535         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16536         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16537
16538         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16539
16540         /* Increment the rx prod index on the rx std ring by at most
16541          * 8 for these chips to workaround hw errata.
16542          */
16543         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16544             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16545             tg3_asic_rev(tp) == ASIC_REV_5755)
16546                 tp->rx_std_max_post = 8;
16547
16548         if (tg3_flag(tp, ASPM_WORKAROUND))
16549                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16550                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16551
16552         return err;
16553 }
16554
16555 #ifdef CONFIG_SPARC
16556 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16557 {
16558         struct net_device *dev = tp->dev;
16559         struct pci_dev *pdev = tp->pdev;
16560         struct device_node *dp = pci_device_to_OF_node(pdev);
16561         const unsigned char *addr;
16562         int len;
16563
16564         addr = of_get_property(dp, "local-mac-address", &len);
16565         if (addr && len == 6) {
16566                 memcpy(dev->dev_addr, addr, 6);
16567                 return 0;
16568         }
16569         return -ENODEV;
16570 }
16571
16572 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16573 {
16574         struct net_device *dev = tp->dev;
16575
16576         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16577         return 0;
16578 }
16579 #endif
16580
16581 static int tg3_get_device_address(struct tg3 *tp)
16582 {
16583         struct net_device *dev = tp->dev;
16584         u32 hi, lo, mac_offset;
16585         int addr_ok = 0;
16586         int err;
16587
16588 #ifdef CONFIG_SPARC
16589         if (!tg3_get_macaddr_sparc(tp))
16590                 return 0;
16591 #endif
16592
16593         if (tg3_flag(tp, IS_SSB_CORE)) {
16594                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16595                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16596                         return 0;
16597         }
16598
16599         mac_offset = 0x7c;
16600         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16601             tg3_flag(tp, 5780_CLASS)) {
16602                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16603                         mac_offset = 0xcc;
16604                 if (tg3_nvram_lock(tp))
16605                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16606                 else
16607                         tg3_nvram_unlock(tp);
16608         } else if (tg3_flag(tp, 5717_PLUS)) {
16609                 if (tp->pci_fn & 1)
16610                         mac_offset = 0xcc;
16611                 if (tp->pci_fn > 1)
16612                         mac_offset += 0x18c;
16613         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16614                 mac_offset = 0x10;
16615
16616         /* First try to get it from MAC address mailbox. */
16617         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16618         if ((hi >> 16) == 0x484b) {
16619                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16620                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16621
16622                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16623                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16624                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16625                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16626                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16627
16628                 /* Some old bootcode may report a 0 MAC address in SRAM */
16629                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16630         }
16631         if (!addr_ok) {
16632                 /* Next, try NVRAM. */
16633                 if (!tg3_flag(tp, NO_NVRAM) &&
16634                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16635                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16636                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16637                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16638                 }
16639                 /* Finally just fetch it out of the MAC control regs. */
16640                 else {
16641                         hi = tr32(MAC_ADDR_0_HIGH);
16642                         lo = tr32(MAC_ADDR_0_LOW);
16643
16644                         dev->dev_addr[5] = lo & 0xff;
16645                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16646                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16647                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16648                         dev->dev_addr[1] = hi & 0xff;
16649                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16650                 }
16651         }
16652
16653         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16654 #ifdef CONFIG_SPARC
16655                 if (!tg3_get_default_macaddr_sparc(tp))
16656                         return 0;
16657 #endif
16658                 return -EINVAL;
16659         }
16660         return 0;
16661 }
16662
16663 #define BOUNDARY_SINGLE_CACHELINE       1
16664 #define BOUNDARY_MULTI_CACHELINE        2
16665
16666 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16667 {
16668         int cacheline_size;
16669         u8 byte;
16670         int goal;
16671
16672         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16673         if (byte == 0)
16674                 cacheline_size = 1024;
16675         else
16676                 cacheline_size = (int) byte * 4;
16677
16678         /* On 5703 and later chips, the boundary bits have no
16679          * effect.
16680          */
16681         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16682             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16683             !tg3_flag(tp, PCI_EXPRESS))
16684                 goto out;
16685
16686 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16687         goal = BOUNDARY_MULTI_CACHELINE;
16688 #else
16689 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16690         goal = BOUNDARY_SINGLE_CACHELINE;
16691 #else
16692         goal = 0;
16693 #endif
16694 #endif
16695
16696         if (tg3_flag(tp, 57765_PLUS)) {
16697                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16698                 goto out;
16699         }
16700
16701         if (!goal)
16702                 goto out;
16703
16704         /* PCI controllers on most RISC systems tend to disconnect
16705          * when a device tries to burst across a cache-line boundary.
16706          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16707          *
16708          * Unfortunately, for PCI-E there are only limited
16709          * write-side controls for this, and thus for reads
16710          * we will still get the disconnects.  We'll also waste
16711          * these PCI cycles for both read and write for chips
16712          * other than 5700 and 5701 which do not implement the
16713          * boundary bits.
16714          */
16715         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16716                 switch (cacheline_size) {
16717                 case 16:
16718                 case 32:
16719                 case 64:
16720                 case 128:
16721                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16722                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16723                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16724                         } else {
16725                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16726                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16727                         }
16728                         break;
16729
16730                 case 256:
16731                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16732                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16733                         break;
16734
16735                 default:
16736                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16737                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16738                         break;
16739                 }
16740         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16741                 switch (cacheline_size) {
16742                 case 16:
16743                 case 32:
16744                 case 64:
16745                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16746                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16747                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16748                                 break;
16749                         }
16750                         /* fallthrough */
16751                 case 128:
16752                 default:
16753                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16754                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16755                         break;
16756                 }
16757         } else {
16758                 switch (cacheline_size) {
16759                 case 16:
16760                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16761                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16762                                         DMA_RWCTRL_WRITE_BNDRY_16);
16763                                 break;
16764                         }
16765                         /* fallthrough */
16766                 case 32:
16767                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16768                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16769                                         DMA_RWCTRL_WRITE_BNDRY_32);
16770                                 break;
16771                         }
16772                         /* fallthrough */
16773                 case 64:
16774                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16775                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16776                                         DMA_RWCTRL_WRITE_BNDRY_64);
16777                                 break;
16778                         }
16779                         /* fallthrough */
16780                 case 128:
16781                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16782                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16783                                         DMA_RWCTRL_WRITE_BNDRY_128);
16784                                 break;
16785                         }
16786                         /* fallthrough */
16787                 case 256:
16788                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16789                                 DMA_RWCTRL_WRITE_BNDRY_256);
16790                         break;
16791                 case 512:
16792                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16793                                 DMA_RWCTRL_WRITE_BNDRY_512);
16794                         break;
16795                 case 1024:
16796                 default:
16797                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16798                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16799                         break;
16800                 }
16801         }
16802
16803 out:
16804         return val;
16805 }
16806
16807 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16808                            int size, bool to_device)
16809 {
16810         struct tg3_internal_buffer_desc test_desc;
16811         u32 sram_dma_descs;
16812         int i, ret;
16813
16814         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16815
16816         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16817         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16818         tw32(RDMAC_STATUS, 0);
16819         tw32(WDMAC_STATUS, 0);
16820
16821         tw32(BUFMGR_MODE, 0);
16822         tw32(FTQ_RESET, 0);
16823
16824         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16825         test_desc.addr_lo = buf_dma & 0xffffffff;
16826         test_desc.nic_mbuf = 0x00002100;
16827         test_desc.len = size;
16828
16829         /*
16830          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16831          * the *second* time the tg3 driver was getting loaded after an
16832          * initial scan.
16833          *
16834          * Broadcom tells me:
16835          *   ...the DMA engine is connected to the GRC block and a DMA
16836          *   reset may affect the GRC block in some unpredictable way...
16837          *   The behavior of resets to individual blocks has not been tested.
16838          *
16839          * Broadcom noted the GRC reset will also reset all sub-components.
16840          */
16841         if (to_device) {
16842                 test_desc.cqid_sqid = (13 << 8) | 2;
16843
16844                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16845                 udelay(40);
16846         } else {
16847                 test_desc.cqid_sqid = (16 << 8) | 7;
16848
16849                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16850                 udelay(40);
16851         }
16852         test_desc.flags = 0x00000005;
16853
16854         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16855                 u32 val;
16856
16857                 val = *(((u32 *)&test_desc) + i);
16858                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16859                                        sram_dma_descs + (i * sizeof(u32)));
16860                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16861         }
16862         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16863
16864         if (to_device)
16865                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16866         else
16867                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16868
16869         ret = -ENODEV;
16870         for (i = 0; i < 40; i++) {
16871                 u32 val;
16872
16873                 if (to_device)
16874                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16875                 else
16876                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16877                 if ((val & 0xffff) == sram_dma_descs) {
16878                         ret = 0;
16879                         break;
16880                 }
16881
16882                 udelay(100);
16883         }
16884
16885         return ret;
16886 }
16887
16888 #define TEST_BUFFER_SIZE        0x2000
16889
16890 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16891         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16892         { },
16893 };
16894
16895 static int tg3_test_dma(struct tg3 *tp)
16896 {
16897         dma_addr_t buf_dma;
16898         u32 *buf, saved_dma_rwctrl;
16899         int ret = 0;
16900
16901         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16902                                  &buf_dma, GFP_KERNEL);
16903         if (!buf) {
16904                 ret = -ENOMEM;
16905                 goto out_nofree;
16906         }
16907
16908         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16909                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16910
16911         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16912
16913         if (tg3_flag(tp, 57765_PLUS))
16914                 goto out;
16915
16916         if (tg3_flag(tp, PCI_EXPRESS)) {
16917                 /* DMA read watermark not used on PCIE */
16918                 tp->dma_rwctrl |= 0x00180000;
16919         } else if (!tg3_flag(tp, PCIX_MODE)) {
16920                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16921                     tg3_asic_rev(tp) == ASIC_REV_5750)
16922                         tp->dma_rwctrl |= 0x003f0000;
16923                 else
16924                         tp->dma_rwctrl |= 0x003f000f;
16925         } else {
16926                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16927                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16928                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16929                         u32 read_water = 0x7;
16930
16931                         /* If the 5704 is behind the EPB bridge, we can
16932                          * do the less restrictive ONE_DMA workaround for
16933                          * better performance.
16934                          */
16935                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16936                             tg3_asic_rev(tp) == ASIC_REV_5704)
16937                                 tp->dma_rwctrl |= 0x8000;
16938                         else if (ccval == 0x6 || ccval == 0x7)
16939                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16940
16941                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16942                                 read_water = 4;
16943                         /* Set bit 23 to enable PCIX hw bug fix */
16944                         tp->dma_rwctrl |=
16945                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16946                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16947                                 (1 << 23);
16948                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16949                         /* 5780 always in PCIX mode */
16950                         tp->dma_rwctrl |= 0x00144000;
16951                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16952                         /* 5714 always in PCIX mode */
16953                         tp->dma_rwctrl |= 0x00148000;
16954                 } else {
16955                         tp->dma_rwctrl |= 0x001b000f;
16956                 }
16957         }
16958         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16959                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16960
16961         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16962             tg3_asic_rev(tp) == ASIC_REV_5704)
16963                 tp->dma_rwctrl &= 0xfffffff0;
16964
16965         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16966             tg3_asic_rev(tp) == ASIC_REV_5701) {
16967                 /* Remove this if it causes problems for some boards. */
16968                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16969
16970                 /* On 5700/5701 chips, we need to set this bit.
16971                  * Otherwise the chip will issue cacheline transactions
16972                  * to streamable DMA memory with not all the byte
16973                  * enables turned on.  This is an error on several
16974                  * RISC PCI controllers, in particular sparc64.
16975                  *
16976                  * On 5703/5704 chips, this bit has been reassigned
16977                  * a different meaning.  In particular, it is used
16978                  * on those chips to enable a PCI-X workaround.
16979                  */
16980                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16981         }
16982
16983         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16984
16985 #if 0
16986         /* Unneeded, already done by tg3_get_invariants.  */
16987         tg3_switch_clocks(tp);
16988 #endif
16989
16990         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16991             tg3_asic_rev(tp) != ASIC_REV_5701)
16992                 goto out;
16993
16994         /* It is best to perform DMA test with maximum write burst size
16995          * to expose the 5700/5701 write DMA bug.
16996          */
16997         saved_dma_rwctrl = tp->dma_rwctrl;
16998         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16999         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17000
17001         while (1) {
17002                 u32 *p = buf, i;
17003
17004                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17005                         p[i] = i;
17006
17007                 /* Send the buffer to the chip. */
17008                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17009                 if (ret) {
17010                         dev_err(&tp->pdev->dev,
17011                                 "%s: Buffer write failed. err = %d\n",
17012                                 __func__, ret);
17013                         break;
17014                 }
17015
17016 #if 0
17017                 /* validate data reached card RAM correctly. */
17018                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17019                         u32 val;
17020                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
17021                         if (le32_to_cpu(val) != p[i]) {
17022                                 dev_err(&tp->pdev->dev,
17023                                         "%s: Buffer corrupted on device! "
17024                                         "(%d != %d)\n", __func__, val, i);
17025                                 /* ret = -ENODEV here? */
17026                         }
17027                         p[i] = 0;
17028                 }
17029 #endif
17030                 /* Now read it back. */
17031                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17032                 if (ret) {
17033                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17034                                 "err = %d\n", __func__, ret);
17035                         break;
17036                 }
17037
17038                 /* Verify it. */
17039                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17040                         if (p[i] == i)
17041                                 continue;
17042
17043                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17044                             DMA_RWCTRL_WRITE_BNDRY_16) {
17045                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17046                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17047                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17048                                 break;
17049                         } else {
17050                                 dev_err(&tp->pdev->dev,
17051                                         "%s: Buffer corrupted on read back! "
17052                                         "(%d != %d)\n", __func__, p[i], i);
17053                                 ret = -ENODEV;
17054                                 goto out;
17055                         }
17056                 }
17057
17058                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17059                         /* Success. */
17060                         ret = 0;
17061                         break;
17062                 }
17063         }
17064         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17065             DMA_RWCTRL_WRITE_BNDRY_16) {
17066                 /* DMA test passed without adjusting DMA boundary,
17067                  * now look for chipsets that are known to expose the
17068                  * DMA bug without failing the test.
17069                  */
17070                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17071                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17072                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17073                 } else {
17074                         /* Safe to use the calculated DMA boundary. */
17075                         tp->dma_rwctrl = saved_dma_rwctrl;
17076                 }
17077
17078                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17079         }
17080
17081 out:
17082         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17083 out_nofree:
17084         return ret;
17085 }
17086
17087 static void tg3_init_bufmgr_config(struct tg3 *tp)
17088 {
17089         if (tg3_flag(tp, 57765_PLUS)) {
17090                 tp->bufmgr_config.mbuf_read_dma_low_water =
17091                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17092                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17093                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17094                 tp->bufmgr_config.mbuf_high_water =
17095                         DEFAULT_MB_HIGH_WATER_57765;
17096
17097                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17098                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17099                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17100                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17101                 tp->bufmgr_config.mbuf_high_water_jumbo =
17102                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17103         } else if (tg3_flag(tp, 5705_PLUS)) {
17104                 tp->bufmgr_config.mbuf_read_dma_low_water =
17105                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17106                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17107                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17108                 tp->bufmgr_config.mbuf_high_water =
17109                         DEFAULT_MB_HIGH_WATER_5705;
17110                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17111                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17112                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17113                         tp->bufmgr_config.mbuf_high_water =
17114                                 DEFAULT_MB_HIGH_WATER_5906;
17115                 }
17116
17117                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17118                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17119                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17120                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17121                 tp->bufmgr_config.mbuf_high_water_jumbo =
17122                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17123         } else {
17124                 tp->bufmgr_config.mbuf_read_dma_low_water =
17125                         DEFAULT_MB_RDMA_LOW_WATER;
17126                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17127                         DEFAULT_MB_MACRX_LOW_WATER;
17128                 tp->bufmgr_config.mbuf_high_water =
17129                         DEFAULT_MB_HIGH_WATER;
17130
17131                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17132                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17133                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17134                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17135                 tp->bufmgr_config.mbuf_high_water_jumbo =
17136                         DEFAULT_MB_HIGH_WATER_JUMBO;
17137         }
17138
17139         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17140         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17141 }
17142
17143 static char *tg3_phy_string(struct tg3 *tp)
17144 {
17145         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17146         case TG3_PHY_ID_BCM5400:        return "5400";
17147         case TG3_PHY_ID_BCM5401:        return "5401";
17148         case TG3_PHY_ID_BCM5411:        return "5411";
17149         case TG3_PHY_ID_BCM5701:        return "5701";
17150         case TG3_PHY_ID_BCM5703:        return "5703";
17151         case TG3_PHY_ID_BCM5704:        return "5704";
17152         case TG3_PHY_ID_BCM5705:        return "5705";
17153         case TG3_PHY_ID_BCM5750:        return "5750";
17154         case TG3_PHY_ID_BCM5752:        return "5752";
17155         case TG3_PHY_ID_BCM5714:        return "5714";
17156         case TG3_PHY_ID_BCM5780:        return "5780";
17157         case TG3_PHY_ID_BCM5755:        return "5755";
17158         case TG3_PHY_ID_BCM5787:        return "5787";
17159         case TG3_PHY_ID_BCM5784:        return "5784";
17160         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17161         case TG3_PHY_ID_BCM5906:        return "5906";
17162         case TG3_PHY_ID_BCM5761:        return "5761";
17163         case TG3_PHY_ID_BCM5718C:       return "5718C";
17164         case TG3_PHY_ID_BCM5718S:       return "5718S";
17165         case TG3_PHY_ID_BCM57765:       return "57765";
17166         case TG3_PHY_ID_BCM5719C:       return "5719C";
17167         case TG3_PHY_ID_BCM5720C:       return "5720C";
17168         case TG3_PHY_ID_BCM5762:        return "5762C";
17169         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17170         case 0:                 return "serdes";
17171         default:                return "unknown";
17172         }
17173 }
17174
17175 static char *tg3_bus_string(struct tg3 *tp, char *str)
17176 {
17177         if (tg3_flag(tp, PCI_EXPRESS)) {
17178                 strcpy(str, "PCI Express");
17179                 return str;
17180         } else if (tg3_flag(tp, PCIX_MODE)) {
17181                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17182
17183                 strcpy(str, "PCIX:");
17184
17185                 if ((clock_ctrl == 7) ||
17186                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17187                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17188                         strcat(str, "133MHz");
17189                 else if (clock_ctrl == 0)
17190                         strcat(str, "33MHz");
17191                 else if (clock_ctrl == 2)
17192                         strcat(str, "50MHz");
17193                 else if (clock_ctrl == 4)
17194                         strcat(str, "66MHz");
17195                 else if (clock_ctrl == 6)
17196                         strcat(str, "100MHz");
17197         } else {
17198                 strcpy(str, "PCI:");
17199                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17200                         strcat(str, "66MHz");
17201                 else
17202                         strcat(str, "33MHz");
17203         }
17204         if (tg3_flag(tp, PCI_32BIT))
17205                 strcat(str, ":32-bit");
17206         else
17207                 strcat(str, ":64-bit");
17208         return str;
17209 }
17210
17211 static void tg3_init_coal(struct tg3 *tp)
17212 {
17213         struct ethtool_coalesce *ec = &tp->coal;
17214
17215         memset(ec, 0, sizeof(*ec));
17216         ec->cmd = ETHTOOL_GCOALESCE;
17217         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17218         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17219         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17220         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17221         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17222         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17223         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17224         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17225         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17226
17227         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17228                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17229                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17230                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17231                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17232                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17233         }
17234
17235         if (tg3_flag(tp, 5705_PLUS)) {
17236                 ec->rx_coalesce_usecs_irq = 0;
17237                 ec->tx_coalesce_usecs_irq = 0;
17238                 ec->stats_block_coalesce_usecs = 0;
17239         }
17240 }
17241
17242 static int tg3_init_one(struct pci_dev *pdev,
17243                                   const struct pci_device_id *ent)
17244 {
17245         struct net_device *dev;
17246         struct tg3 *tp;
17247         int i, err;
17248         u32 sndmbx, rcvmbx, intmbx;
17249         char str[40];
17250         u64 dma_mask, persist_dma_mask;
17251         netdev_features_t features = 0;
17252
17253         printk_once(KERN_INFO "%s\n", version);
17254
17255         err = pci_enable_device(pdev);
17256         if (err) {
17257                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17258                 return err;
17259         }
17260
17261         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17262         if (err) {
17263                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17264                 goto err_out_disable_pdev;
17265         }
17266
17267         pci_set_master(pdev);
17268
17269         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17270         if (!dev) {
17271                 err = -ENOMEM;
17272                 goto err_out_free_res;
17273         }
17274
17275         SET_NETDEV_DEV(dev, &pdev->dev);
17276
17277         tp = netdev_priv(dev);
17278         tp->pdev = pdev;
17279         tp->dev = dev;
17280         tp->pm_cap = pdev->pm_cap;
17281         tp->rx_mode = TG3_DEF_RX_MODE;
17282         tp->tx_mode = TG3_DEF_TX_MODE;
17283         tp->irq_sync = 1;
17284
17285         if (tg3_debug > 0)
17286                 tp->msg_enable = tg3_debug;
17287         else
17288                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17289
17290         if (pdev_is_ssb_gige_core(pdev)) {
17291                 tg3_flag_set(tp, IS_SSB_CORE);
17292                 if (ssb_gige_must_flush_posted_writes(pdev))
17293                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17294                 if (ssb_gige_one_dma_at_once(pdev))
17295                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17296                 if (ssb_gige_have_roboswitch(pdev))
17297                         tg3_flag_set(tp, ROBOSWITCH);
17298                 if (ssb_gige_is_rgmii(pdev))
17299                         tg3_flag_set(tp, RGMII_MODE);
17300         }
17301
17302         /* The word/byte swap controls here control register access byte
17303          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17304          * setting below.
17305          */
17306         tp->misc_host_ctrl =
17307                 MISC_HOST_CTRL_MASK_PCI_INT |
17308                 MISC_HOST_CTRL_WORD_SWAP |
17309                 MISC_HOST_CTRL_INDIR_ACCESS |
17310                 MISC_HOST_CTRL_PCISTATE_RW;
17311
17312         /* The NONFRM (non-frame) byte/word swap controls take effect
17313          * on descriptor entries, anything which isn't packet data.
17314          *
17315          * The StrongARM chips on the board (one for tx, one for rx)
17316          * are running in big-endian mode.
17317          */
17318         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17319                         GRC_MODE_WSWAP_NONFRM_DATA);
17320 #ifdef __BIG_ENDIAN
17321         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17322 #endif
17323         spin_lock_init(&tp->lock);
17324         spin_lock_init(&tp->indirect_lock);
17325         INIT_WORK(&tp->reset_task, tg3_reset_task);
17326
17327         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17328         if (!tp->regs) {
17329                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17330                 err = -ENOMEM;
17331                 goto err_out_free_dev;
17332         }
17333
17334         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17335             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17336             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17337             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17338             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17339             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17340             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17341             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17342             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17343             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17344             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17345             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17346                 tg3_flag_set(tp, ENABLE_APE);
17347                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17348                 if (!tp->aperegs) {
17349                         dev_err(&pdev->dev,
17350                                 "Cannot map APE registers, aborting\n");
17351                         err = -ENOMEM;
17352                         goto err_out_iounmap;
17353                 }
17354         }
17355
17356         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17357         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17358
17359         dev->ethtool_ops = &tg3_ethtool_ops;
17360         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17361         dev->netdev_ops = &tg3_netdev_ops;
17362         dev->irq = pdev->irq;
17363
17364         err = tg3_get_invariants(tp, ent);
17365         if (err) {
17366                 dev_err(&pdev->dev,
17367                         "Problem fetching invariants of chip, aborting\n");
17368                 goto err_out_apeunmap;
17369         }
17370
17371         /* The EPB bridge inside 5714, 5715, and 5780 and any
17372          * device behind the EPB cannot support DMA addresses > 40-bit.
17373          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17374          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17375          * do DMA address check in tg3_start_xmit().
17376          */
17377         if (tg3_flag(tp, IS_5788))
17378                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17379         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17380                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17381 #ifdef CONFIG_HIGHMEM
17382                 dma_mask = DMA_BIT_MASK(64);
17383 #endif
17384         } else
17385                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17386
17387         /* Configure DMA attributes. */
17388         if (dma_mask > DMA_BIT_MASK(32)) {
17389                 err = pci_set_dma_mask(pdev, dma_mask);
17390                 if (!err) {
17391                         features |= NETIF_F_HIGHDMA;
17392                         err = pci_set_consistent_dma_mask(pdev,
17393                                                           persist_dma_mask);
17394                         if (err < 0) {
17395                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17396                                         "DMA for consistent allocations\n");
17397                                 goto err_out_apeunmap;
17398                         }
17399                 }
17400         }
17401         if (err || dma_mask == DMA_BIT_MASK(32)) {
17402                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17403                 if (err) {
17404                         dev_err(&pdev->dev,
17405                                 "No usable DMA configuration, aborting\n");
17406                         goto err_out_apeunmap;
17407                 }
17408         }
17409
17410         tg3_init_bufmgr_config(tp);
17411
17412         features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17413
17414         /* 5700 B0 chips do not support checksumming correctly due
17415          * to hardware bugs.
17416          */
17417         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17418                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17419
17420                 if (tg3_flag(tp, 5755_PLUS))
17421                         features |= NETIF_F_IPV6_CSUM;
17422         }
17423
17424         /* TSO is on by default on chips that support hardware TSO.
17425          * Firmware TSO on older chips gives lower performance, so it
17426          * is off by default, but can be enabled using ethtool.
17427          */
17428         if ((tg3_flag(tp, HW_TSO_1) ||
17429              tg3_flag(tp, HW_TSO_2) ||
17430              tg3_flag(tp, HW_TSO_3)) &&
17431             (features & NETIF_F_IP_CSUM))
17432                 features |= NETIF_F_TSO;
17433         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17434                 if (features & NETIF_F_IPV6_CSUM)
17435                         features |= NETIF_F_TSO6;
17436                 if (tg3_flag(tp, HW_TSO_3) ||
17437                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17438                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17439                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17440                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17441                     tg3_asic_rev(tp) == ASIC_REV_57780)
17442                         features |= NETIF_F_TSO_ECN;
17443         }
17444
17445         dev->features |= features;
17446         dev->vlan_features |= features;
17447
17448         /*
17449          * Add loopback capability only for a subset of devices that support
17450          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17451          * loopback for the remaining devices.
17452          */
17453         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17454             !tg3_flag(tp, CPMU_PRESENT))
17455                 /* Add the loopback capability */
17456                 features |= NETIF_F_LOOPBACK;
17457
17458         dev->hw_features |= features;
17459
17460         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17461             !tg3_flag(tp, TSO_CAPABLE) &&
17462             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17463                 tg3_flag_set(tp, MAX_RXPEND_64);
17464                 tp->rx_pending = 63;
17465         }
17466
17467         err = tg3_get_device_address(tp);
17468         if (err) {
17469                 dev_err(&pdev->dev,
17470                         "Could not obtain valid ethernet address, aborting\n");
17471                 goto err_out_apeunmap;
17472         }
17473
17474         /*
17475          * Reset chip in case UNDI or EFI driver did not shutdown
17476          * DMA self test will enable WDMAC and we'll see (spurious)
17477          * pending DMA on the PCI bus at that point.
17478          */
17479         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17480             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17481                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17482                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17483         }
17484
17485         err = tg3_test_dma(tp);
17486         if (err) {
17487                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17488                 goto err_out_apeunmap;
17489         }
17490
17491         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17492         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17493         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17494         for (i = 0; i < tp->irq_max; i++) {
17495                 struct tg3_napi *tnapi = &tp->napi[i];
17496
17497                 tnapi->tp = tp;
17498                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17499
17500                 tnapi->int_mbox = intmbx;
17501                 if (i <= 4)
17502                         intmbx += 0x8;
17503                 else
17504                         intmbx += 0x4;
17505
17506                 tnapi->consmbox = rcvmbx;
17507                 tnapi->prodmbox = sndmbx;
17508
17509                 if (i)
17510                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17511                 else
17512                         tnapi->coal_now = HOSTCC_MODE_NOW;
17513
17514                 if (!tg3_flag(tp, SUPPORT_MSIX))
17515                         break;
17516
17517                 /*
17518                  * If we support MSIX, we'll be using RSS.  If we're using
17519                  * RSS, the first vector only handles link interrupts and the
17520                  * remaining vectors handle rx and tx interrupts.  Reuse the
17521                  * mailbox values for the next iteration.  The values we setup
17522                  * above are still useful for the single vectored mode.
17523                  */
17524                 if (!i)
17525                         continue;
17526
17527                 rcvmbx += 0x8;
17528
17529                 if (sndmbx & 0x4)
17530                         sndmbx -= 0x4;
17531                 else
17532                         sndmbx += 0xc;
17533         }
17534
17535         tg3_init_coal(tp);
17536
17537         pci_set_drvdata(pdev, dev);
17538
17539         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17540             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17541             tg3_asic_rev(tp) == ASIC_REV_5762)
17542                 tg3_flag_set(tp, PTP_CAPABLE);
17543
17544         tg3_timer_init(tp);
17545
17546         tg3_carrier_off(tp);
17547
17548         err = register_netdev(dev);
17549         if (err) {
17550                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17551                 goto err_out_apeunmap;
17552         }
17553
17554         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17555                     tp->board_part_number,
17556                     tg3_chip_rev_id(tp),
17557                     tg3_bus_string(tp, str),
17558                     dev->dev_addr);
17559
17560         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17561                 struct phy_device *phydev;
17562                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17563                 netdev_info(dev,
17564                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17565                             phydev->drv->name, dev_name(&phydev->dev));
17566         } else {
17567                 char *ethtype;
17568
17569                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17570                         ethtype = "10/100Base-TX";
17571                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17572                         ethtype = "1000Base-SX";
17573                 else
17574                         ethtype = "10/100/1000Base-T";
17575
17576                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17577                             "(WireSpeed[%d], EEE[%d])\n",
17578                             tg3_phy_string(tp), ethtype,
17579                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17580                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17581         }
17582
17583         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17584                     (dev->features & NETIF_F_RXCSUM) != 0,
17585                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17586                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17587                     tg3_flag(tp, ENABLE_ASF) != 0,
17588                     tg3_flag(tp, TSO_CAPABLE) != 0);
17589         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17590                     tp->dma_rwctrl,
17591                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17592                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17593
17594         pci_save_state(pdev);
17595
17596         return 0;
17597
17598 err_out_apeunmap:
17599         if (tp->aperegs) {
17600                 iounmap(tp->aperegs);
17601                 tp->aperegs = NULL;
17602         }
17603
17604 err_out_iounmap:
17605         if (tp->regs) {
17606                 iounmap(tp->regs);
17607                 tp->regs = NULL;
17608         }
17609
17610 err_out_free_dev:
17611         free_netdev(dev);
17612
17613 err_out_free_res:
17614         pci_release_regions(pdev);
17615
17616 err_out_disable_pdev:
17617         pci_disable_device(pdev);
17618         pci_set_drvdata(pdev, NULL);
17619         return err;
17620 }
17621
17622 static void tg3_remove_one(struct pci_dev *pdev)
17623 {
17624         struct net_device *dev = pci_get_drvdata(pdev);
17625
17626         if (dev) {
17627                 struct tg3 *tp = netdev_priv(dev);
17628
17629                 release_firmware(tp->fw);
17630
17631                 tg3_reset_task_cancel(tp);
17632
17633                 if (tg3_flag(tp, USE_PHYLIB)) {
17634                         tg3_phy_fini(tp);
17635                         tg3_mdio_fini(tp);
17636                 }
17637
17638                 unregister_netdev(dev);
17639                 if (tp->aperegs) {
17640                         iounmap(tp->aperegs);
17641                         tp->aperegs = NULL;
17642                 }
17643                 if (tp->regs) {
17644                         iounmap(tp->regs);
17645                         tp->regs = NULL;
17646                 }
17647                 free_netdev(dev);
17648                 pci_release_regions(pdev);
17649                 pci_disable_device(pdev);
17650                 pci_set_drvdata(pdev, NULL);
17651         }
17652 }
17653
17654 #ifdef CONFIG_PM_SLEEP
17655 static int tg3_suspend(struct device *device)
17656 {
17657         struct pci_dev *pdev = to_pci_dev(device);
17658         struct net_device *dev = pci_get_drvdata(pdev);
17659         struct tg3 *tp = netdev_priv(dev);
17660         int err;
17661
17662         if (!netif_running(dev))
17663                 return 0;
17664
17665         tg3_reset_task_cancel(tp);
17666         tg3_phy_stop(tp);
17667         tg3_netif_stop(tp);
17668
17669         tg3_timer_stop(tp);
17670
17671         tg3_full_lock(tp, 1);
17672         tg3_disable_ints(tp);
17673         tg3_full_unlock(tp);
17674
17675         netif_device_detach(dev);
17676
17677         tg3_full_lock(tp, 0);
17678         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17679         tg3_flag_clear(tp, INIT_COMPLETE);
17680         tg3_full_unlock(tp);
17681
17682         err = tg3_power_down_prepare(tp);
17683         if (err) {
17684                 int err2;
17685
17686                 tg3_full_lock(tp, 0);
17687
17688                 tg3_flag_set(tp, INIT_COMPLETE);
17689                 err2 = tg3_restart_hw(tp, true);
17690                 if (err2)
17691                         goto out;
17692
17693                 tg3_timer_start(tp);
17694
17695                 netif_device_attach(dev);
17696                 tg3_netif_start(tp);
17697
17698 out:
17699                 tg3_full_unlock(tp);
17700
17701                 if (!err2)
17702                         tg3_phy_start(tp);
17703         }
17704
17705         return err;
17706 }
17707
17708 static int tg3_resume(struct device *device)
17709 {
17710         struct pci_dev *pdev = to_pci_dev(device);
17711         struct net_device *dev = pci_get_drvdata(pdev);
17712         struct tg3 *tp = netdev_priv(dev);
17713         int err;
17714
17715         if (!netif_running(dev))
17716                 return 0;
17717
17718         netif_device_attach(dev);
17719
17720         tg3_full_lock(tp, 0);
17721
17722         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17723
17724         tg3_flag_set(tp, INIT_COMPLETE);
17725         err = tg3_restart_hw(tp,
17726                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17727         if (err)
17728                 goto out;
17729
17730         tg3_timer_start(tp);
17731
17732         tg3_netif_start(tp);
17733
17734 out:
17735         tg3_full_unlock(tp);
17736
17737         if (!err)
17738                 tg3_phy_start(tp);
17739
17740         return err;
17741 }
17742 #endif /* CONFIG_PM_SLEEP */
17743
17744 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17745
17746 /**
17747  * tg3_io_error_detected - called when PCI error is detected
17748  * @pdev: Pointer to PCI device
17749  * @state: The current pci connection state
17750  *
17751  * This function is called after a PCI bus error affecting
17752  * this device has been detected.
17753  */
17754 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17755                                               pci_channel_state_t state)
17756 {
17757         struct net_device *netdev = pci_get_drvdata(pdev);
17758         struct tg3 *tp = netdev_priv(netdev);
17759         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17760
17761         netdev_info(netdev, "PCI I/O error detected\n");
17762
17763         rtnl_lock();
17764
17765         if (!netif_running(netdev))
17766                 goto done;
17767
17768         tg3_phy_stop(tp);
17769
17770         tg3_netif_stop(tp);
17771
17772         tg3_timer_stop(tp);
17773
17774         /* Want to make sure that the reset task doesn't run */
17775         tg3_reset_task_cancel(tp);
17776
17777         netif_device_detach(netdev);
17778
17779         /* Clean up software state, even if MMIO is blocked */
17780         tg3_full_lock(tp, 0);
17781         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17782         tg3_full_unlock(tp);
17783
17784 done:
17785         if (state == pci_channel_io_perm_failure) {
17786                 tg3_napi_enable(tp);
17787                 dev_close(netdev);
17788                 err = PCI_ERS_RESULT_DISCONNECT;
17789         } else {
17790                 pci_disable_device(pdev);
17791         }
17792
17793         rtnl_unlock();
17794
17795         return err;
17796 }
17797
17798 /**
17799  * tg3_io_slot_reset - called after the pci bus has been reset.
17800  * @pdev: Pointer to PCI device
17801  *
17802  * Restart the card from scratch, as if from a cold-boot.
17803  * At this point, the card has exprienced a hard reset,
17804  * followed by fixups by BIOS, and has its config space
17805  * set up identically to what it was at cold boot.
17806  */
17807 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17808 {
17809         struct net_device *netdev = pci_get_drvdata(pdev);
17810         struct tg3 *tp = netdev_priv(netdev);
17811         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17812         int err;
17813
17814         rtnl_lock();
17815
17816         if (pci_enable_device(pdev)) {
17817                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17818                 goto done;
17819         }
17820
17821         pci_set_master(pdev);
17822         pci_restore_state(pdev);
17823         pci_save_state(pdev);
17824
17825         if (!netif_running(netdev)) {
17826                 rc = PCI_ERS_RESULT_RECOVERED;
17827                 goto done;
17828         }
17829
17830         err = tg3_power_up(tp);
17831         if (err)
17832                 goto done;
17833
17834         rc = PCI_ERS_RESULT_RECOVERED;
17835
17836 done:
17837         if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
17838                 tg3_napi_enable(tp);
17839                 dev_close(netdev);
17840         }
17841         rtnl_unlock();
17842
17843         return rc;
17844 }
17845
17846 /**
17847  * tg3_io_resume - called when traffic can start flowing again.
17848  * @pdev: Pointer to PCI device
17849  *
17850  * This callback is called when the error recovery driver tells
17851  * us that its OK to resume normal operation.
17852  */
17853 static void tg3_io_resume(struct pci_dev *pdev)
17854 {
17855         struct net_device *netdev = pci_get_drvdata(pdev);
17856         struct tg3 *tp = netdev_priv(netdev);
17857         int err;
17858
17859         rtnl_lock();
17860
17861         if (!netif_running(netdev))
17862                 goto done;
17863
17864         tg3_full_lock(tp, 0);
17865         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17866         tg3_flag_set(tp, INIT_COMPLETE);
17867         err = tg3_restart_hw(tp, true);
17868         if (err) {
17869                 tg3_full_unlock(tp);
17870                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17871                 goto done;
17872         }
17873
17874         netif_device_attach(netdev);
17875
17876         tg3_timer_start(tp);
17877
17878         tg3_netif_start(tp);
17879
17880         tg3_full_unlock(tp);
17881
17882         tg3_phy_start(tp);
17883
17884 done:
17885         rtnl_unlock();
17886 }
17887
17888 static const struct pci_error_handlers tg3_err_handler = {
17889         .error_detected = tg3_io_error_detected,
17890         .slot_reset     = tg3_io_slot_reset,
17891         .resume         = tg3_io_resume
17892 };
17893
17894 static struct pci_driver tg3_driver = {
17895         .name           = DRV_MODULE_NAME,
17896         .id_table       = tg3_pci_tbl,
17897         .probe          = tg3_init_one,
17898         .remove         = tg3_remove_one,
17899         .err_handler    = &tg3_err_handler,
17900         .driver.pm      = &tg3_pm_ops,
17901 };
17902
17903 module_pci_driver(tg3_driver);