a352c44d31eca164fd64ce66d97fc72d3d8966b1
[cascardo/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     122
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458         u32 reg;
1459         u32 val;
1460
1461         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462                 return;
1463
1464         tg3_wait_for_event_ack(tp);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470         val = 0;
1471         if (!tg3_readphy(tp, MII_BMCR, &reg))
1472                 val = reg << 16;
1473         if (!tg3_readphy(tp, MII_BMSR, &reg))
1474                 val |= (reg & 0xffff);
1475         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477         val = 0;
1478         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479                 val = reg << 16;
1480         if (!tg3_readphy(tp, MII_LPA, &reg))
1481                 val |= (reg & 0xffff);
1482         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484         val = 0;
1485         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487                         val = reg << 16;
1488                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489                         val |= (reg & 0xffff);
1490         }
1491         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494                 val = reg << 16;
1495         else
1496                 val = 0;
1497         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499         tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506                 /* Wait for RX cpu to ACK the previous event. */
1507                 tg3_wait_for_event_ack(tp);
1508
1509                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511                 tg3_generate_fw_event(tp);
1512
1513                 /* Wait for RX cpu to ACK this event. */
1514                 tg3_wait_for_event_ack(tp);
1515         }
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525                 switch (kind) {
1526                 case RESET_KIND_INIT:
1527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528                                       DRV_STATE_START);
1529                         break;
1530
1531                 case RESET_KIND_SHUTDOWN:
1532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533                                       DRV_STATE_UNLOAD);
1534                         break;
1535
1536                 case RESET_KIND_SUSPEND:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_SUSPEND);
1539                         break;
1540
1541                 default:
1542                         break;
1543                 }
1544         }
1545
1546         if (kind == RESET_KIND_INIT ||
1547             kind == RESET_KIND_SUSPEND)
1548                 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555                 switch (kind) {
1556                 case RESET_KIND_INIT:
1557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558                                       DRV_STATE_START_DONE);
1559                         break;
1560
1561                 case RESET_KIND_SHUTDOWN:
1562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563                                       DRV_STATE_UNLOAD_DONE);
1564                         break;
1565
1566                 default:
1567                         break;
1568                 }
1569         }
1570
1571         if (kind == RESET_KIND_SHUTDOWN)
1572                 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578         if (tg3_flag(tp, ENABLE_ASF)) {
1579                 switch (kind) {
1580                 case RESET_KIND_INIT:
1581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582                                       DRV_STATE_START);
1583                         break;
1584
1585                 case RESET_KIND_SHUTDOWN:
1586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587                                       DRV_STATE_UNLOAD);
1588                         break;
1589
1590                 case RESET_KIND_SUSPEND:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_SUSPEND);
1593                         break;
1594
1595                 default:
1596                         break;
1597                 }
1598         }
1599 }
1600
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603         int i;
1604         u32 val;
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607                 /* Wait up to 20ms for init done. */
1608                 for (i = 0; i < 200; i++) {
1609                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610                                 return 0;
1611                         udelay(100);
1612                 }
1613                 return -ENODEV;
1614         }
1615
1616         /* Wait for firmware initialization to complete. */
1617         for (i = 0; i < 100000; i++) {
1618                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620                         break;
1621                 udelay(10);
1622         }
1623
1624         /* Chip might not be fitted with firmware.  Some Sun onboard
1625          * parts are configured like that.  So don't signal the timeout
1626          * of the above loop as an error, but do report the lack of
1627          * running firmware once.
1628          */
1629         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632                 netdev_info(tp->dev, "No firmware running\n");
1633         }
1634
1635         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636                 /* The 57765 A0 needs a little more
1637                  * time to do some important work.
1638                  */
1639                 mdelay(10);
1640         }
1641
1642         return 0;
1643 }
1644
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647         if (!netif_carrier_ok(tp->dev)) {
1648                 netif_info(tp, link, tp->dev, "Link is down\n");
1649                 tg3_ump_link_report(tp);
1650         } else if (netif_msg_link(tp)) {
1651                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652                             (tp->link_config.active_speed == SPEED_1000 ?
1653                              1000 :
1654                              (tp->link_config.active_speed == SPEED_100 ?
1655                               100 : 10)),
1656                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1657                              "full" : "half"));
1658
1659                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661                             "on" : "off",
1662                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663                             "on" : "off");
1664
1665                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666                         netdev_info(tp->dev, "EEE is %s\n",
1667                                     tp->setlpicnt ? "enabled" : "disabled");
1668
1669                 tg3_ump_link_report(tp);
1670         }
1671 }
1672
1673 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1674 {
1675         u16 miireg;
1676
1677         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678                 miireg = ADVERTISE_1000XPAUSE;
1679         else if (flow_ctrl & FLOW_CTRL_TX)
1680                 miireg = ADVERTISE_1000XPSE_ASYM;
1681         else if (flow_ctrl & FLOW_CTRL_RX)
1682                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683         else
1684                 miireg = 0;
1685
1686         return miireg;
1687 }
1688
1689 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1690 {
1691         u8 cap = 0;
1692
1693         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1694                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1695         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1696                 if (lcladv & ADVERTISE_1000XPAUSE)
1697                         cap = FLOW_CTRL_RX;
1698                 if (rmtadv & ADVERTISE_1000XPAUSE)
1699                         cap = FLOW_CTRL_TX;
1700         }
1701
1702         return cap;
1703 }
1704
1705 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1706 {
1707         u8 autoneg;
1708         u8 flowctrl = 0;
1709         u32 old_rx_mode = tp->rx_mode;
1710         u32 old_tx_mode = tp->tx_mode;
1711
1712         if (tg3_flag(tp, USE_PHYLIB))
1713                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1714         else
1715                 autoneg = tp->link_config.autoneg;
1716
1717         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1718                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1719                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1720                 else
1721                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1722         } else
1723                 flowctrl = tp->link_config.flowctrl;
1724
1725         tp->link_config.active_flowctrl = flowctrl;
1726
1727         if (flowctrl & FLOW_CTRL_RX)
1728                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1729         else
1730                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1731
1732         if (old_rx_mode != tp->rx_mode)
1733                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1734
1735         if (flowctrl & FLOW_CTRL_TX)
1736                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1737         else
1738                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1739
1740         if (old_tx_mode != tp->tx_mode)
1741                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1742 }
1743
1744 static void tg3_adjust_link(struct net_device *dev)
1745 {
1746         u8 oldflowctrl, linkmesg = 0;
1747         u32 mac_mode, lcl_adv, rmt_adv;
1748         struct tg3 *tp = netdev_priv(dev);
1749         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1750
1751         spin_lock_bh(&tp->lock);
1752
1753         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1754                                     MAC_MODE_HALF_DUPLEX);
1755
1756         oldflowctrl = tp->link_config.active_flowctrl;
1757
1758         if (phydev->link) {
1759                 lcl_adv = 0;
1760                 rmt_adv = 0;
1761
1762                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1763                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1764                 else if (phydev->speed == SPEED_1000 ||
1765                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1766                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767                 else
1768                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1769
1770                 if (phydev->duplex == DUPLEX_HALF)
1771                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1772                 else {
1773                         lcl_adv = mii_advertise_flowctrl(
1774                                   tp->link_config.flowctrl);
1775
1776                         if (phydev->pause)
1777                                 rmt_adv = LPA_PAUSE_CAP;
1778                         if (phydev->asym_pause)
1779                                 rmt_adv |= LPA_PAUSE_ASYM;
1780                 }
1781
1782                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1783         } else
1784                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1785
1786         if (mac_mode != tp->mac_mode) {
1787                 tp->mac_mode = mac_mode;
1788                 tw32_f(MAC_MODE, tp->mac_mode);
1789                 udelay(40);
1790         }
1791
1792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1793                 if (phydev->speed == SPEED_10)
1794                         tw32(MAC_MI_STAT,
1795                              MAC_MI_STAT_10MBPS_MODE |
1796                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1797                 else
1798                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1799         }
1800
1801         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1802                 tw32(MAC_TX_LENGTHS,
1803                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1804                       (6 << TX_LENGTHS_IPG_SHIFT) |
1805                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1806         else
1807                 tw32(MAC_TX_LENGTHS,
1808                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1809                       (6 << TX_LENGTHS_IPG_SHIFT) |
1810                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1811
1812         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1813             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1814             phydev->speed != tp->link_config.active_speed ||
1815             phydev->duplex != tp->link_config.active_duplex ||
1816             oldflowctrl != tp->link_config.active_flowctrl)
1817                 linkmesg = 1;
1818
1819         tp->link_config.active_speed = phydev->speed;
1820         tp->link_config.active_duplex = phydev->duplex;
1821
1822         spin_unlock_bh(&tp->lock);
1823
1824         if (linkmesg)
1825                 tg3_link_report(tp);
1826 }
1827
1828 static int tg3_phy_init(struct tg3 *tp)
1829 {
1830         struct phy_device *phydev;
1831
1832         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1833                 return 0;
1834
1835         /* Bring the PHY back to a known state. */
1836         tg3_bmcr_reset(tp);
1837
1838         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1839
1840         /* Attach the MAC to the PHY. */
1841         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1842                              phydev->dev_flags, phydev->interface);
1843         if (IS_ERR(phydev)) {
1844                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1845                 return PTR_ERR(phydev);
1846         }
1847
1848         /* Mask with MAC supported features. */
1849         switch (phydev->interface) {
1850         case PHY_INTERFACE_MODE_GMII:
1851         case PHY_INTERFACE_MODE_RGMII:
1852                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1853                         phydev->supported &= (PHY_GBIT_FEATURES |
1854                                               SUPPORTED_Pause |
1855                                               SUPPORTED_Asym_Pause);
1856                         break;
1857                 }
1858                 /* fallthru */
1859         case PHY_INTERFACE_MODE_MII:
1860                 phydev->supported &= (PHY_BASIC_FEATURES |
1861                                       SUPPORTED_Pause |
1862                                       SUPPORTED_Asym_Pause);
1863                 break;
1864         default:
1865                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1866                 return -EINVAL;
1867         }
1868
1869         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1870
1871         phydev->advertising = phydev->supported;
1872
1873         return 0;
1874 }
1875
1876 static void tg3_phy_start(struct tg3 *tp)
1877 {
1878         struct phy_device *phydev;
1879
1880         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1881                 return;
1882
1883         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1884
1885         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1886                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1887                 phydev->speed = tp->link_config.orig_speed;
1888                 phydev->duplex = tp->link_config.orig_duplex;
1889                 phydev->autoneg = tp->link_config.orig_autoneg;
1890                 phydev->advertising = tp->link_config.orig_advertising;
1891         }
1892
1893         phy_start(phydev);
1894
1895         phy_start_aneg(phydev);
1896 }
1897
1898 static void tg3_phy_stop(struct tg3 *tp)
1899 {
1900         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901                 return;
1902
1903         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1904 }
1905
1906 static void tg3_phy_fini(struct tg3 *tp)
1907 {
1908         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1909                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1910                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1911         }
1912 }
1913
1914 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1915 {
1916         int err;
1917         u32 val;
1918
1919         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1920                 return 0;
1921
1922         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1923                 /* Cannot do read-modify-write on 5401 */
1924                 err = tg3_phy_auxctl_write(tp,
1925                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1926                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1927                                            0x4c20);
1928                 goto done;
1929         }
1930
1931         err = tg3_phy_auxctl_read(tp,
1932                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1933         if (err)
1934                 return err;
1935
1936         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1937         err = tg3_phy_auxctl_write(tp,
1938                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1939
1940 done:
1941         return err;
1942 }
1943
1944 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1945 {
1946         u32 phytest;
1947
1948         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1949                 u32 phy;
1950
1951                 tg3_writephy(tp, MII_TG3_FET_TEST,
1952                              phytest | MII_TG3_FET_SHADOW_EN);
1953                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1954                         if (enable)
1955                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1956                         else
1957                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1958                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1959                 }
1960                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1961         }
1962 }
1963
1964 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966         u32 reg;
1967
1968         if (!tg3_flag(tp, 5705_PLUS) ||
1969             (tg3_flag(tp, 5717_PLUS) &&
1970              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1971                 return;
1972
1973         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1974                 tg3_phy_fet_toggle_apd(tp, enable);
1975                 return;
1976         }
1977
1978         reg = MII_TG3_MISC_SHDW_WREN |
1979               MII_TG3_MISC_SHDW_SCR5_SEL |
1980               MII_TG3_MISC_SHDW_SCR5_LPED |
1981               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1982               MII_TG3_MISC_SHDW_SCR5_SDTL |
1983               MII_TG3_MISC_SHDW_SCR5_C125OE;
1984         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1985                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1986
1987         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1988
1989
1990         reg = MII_TG3_MISC_SHDW_WREN |
1991               MII_TG3_MISC_SHDW_APD_SEL |
1992               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1993         if (enable)
1994                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1995
1996         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1997 }
1998
1999 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2000 {
2001         u32 phy;
2002
2003         if (!tg3_flag(tp, 5705_PLUS) ||
2004             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2005                 return;
2006
2007         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2008                 u32 ephy;
2009
2010                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2011                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2012
2013                         tg3_writephy(tp, MII_TG3_FET_TEST,
2014                                      ephy | MII_TG3_FET_SHADOW_EN);
2015                         if (!tg3_readphy(tp, reg, &phy)) {
2016                                 if (enable)
2017                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2018                                 else
2019                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2020                                 tg3_writephy(tp, reg, phy);
2021                         }
2022                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2023                 }
2024         } else {
2025                 int ret;
2026
2027                 ret = tg3_phy_auxctl_read(tp,
2028                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2029                 if (!ret) {
2030                         if (enable)
2031                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2032                         else
2033                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2034                         tg3_phy_auxctl_write(tp,
2035                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2036                 }
2037         }
2038 }
2039
2040 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2041 {
2042         int ret;
2043         u32 val;
2044
2045         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2046                 return;
2047
2048         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2049         if (!ret)
2050                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2051                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2052 }
2053
2054 static void tg3_phy_apply_otp(struct tg3 *tp)
2055 {
2056         u32 otp, phy;
2057
2058         if (!tp->phy_otp)
2059                 return;
2060
2061         otp = tp->phy_otp;
2062
2063         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2064                 return;
2065
2066         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2067         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2068         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2069
2070         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2071               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2072         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2073
2074         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2075         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2076         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2077
2078         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2079         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2080
2081         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2083
2084         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2085               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2086         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2087
2088         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2089 }
2090
2091 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2092 {
2093         u32 val;
2094
2095         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2096                 return;
2097
2098         tp->setlpicnt = 0;
2099
2100         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2101             current_link_up == 1 &&
2102             tp->link_config.active_duplex == DUPLEX_FULL &&
2103             (tp->link_config.active_speed == SPEED_100 ||
2104              tp->link_config.active_speed == SPEED_1000)) {
2105                 u32 eeectl;
2106
2107                 if (tp->link_config.active_speed == SPEED_1000)
2108                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2109                 else
2110                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2111
2112                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2113
2114                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2115                                   TG3_CL45_D7_EEERES_STAT, &val);
2116
2117                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2118                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2119                         tp->setlpicnt = 2;
2120         }
2121
2122         if (!tp->setlpicnt) {
2123                 if (current_link_up == 1 &&
2124                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2126                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2127                 }
2128
2129                 val = tr32(TG3_CPMU_EEE_MODE);
2130                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2131         }
2132 }
2133
2134 static void tg3_phy_eee_enable(struct tg3 *tp)
2135 {
2136         u32 val;
2137
2138         if (tp->link_config.active_speed == SPEED_1000 &&
2139             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2140              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2141              tg3_flag(tp, 57765_CLASS)) &&
2142             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143                 val = MII_TG3_DSP_TAP26_ALNOKO |
2144                       MII_TG3_DSP_TAP26_RMRXSTO;
2145                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2146                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147         }
2148
2149         val = tr32(TG3_CPMU_EEE_MODE);
2150         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2151 }
2152
2153 static int tg3_wait_macro_done(struct tg3 *tp)
2154 {
2155         int limit = 100;
2156
2157         while (limit--) {
2158                 u32 tmp32;
2159
2160                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2161                         if ((tmp32 & 0x1000) == 0)
2162                                 break;
2163                 }
2164         }
2165         if (limit < 0)
2166                 return -EBUSY;
2167
2168         return 0;
2169 }
2170
2171 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2172 {
2173         static const u32 test_pat[4][6] = {
2174         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2175         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2176         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2177         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2178         };
2179         int chan;
2180
2181         for (chan = 0; chan < 4; chan++) {
2182                 int i;
2183
2184                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2185                              (chan * 0x2000) | 0x0200);
2186                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2187
2188                 for (i = 0; i < 6; i++)
2189                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2190                                      test_pat[chan][i]);
2191
2192                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2193                 if (tg3_wait_macro_done(tp)) {
2194                         *resetp = 1;
2195                         return -EBUSY;
2196                 }
2197
2198                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2199                              (chan * 0x2000) | 0x0200);
2200                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2201                 if (tg3_wait_macro_done(tp)) {
2202                         *resetp = 1;
2203                         return -EBUSY;
2204                 }
2205
2206                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2207                 if (tg3_wait_macro_done(tp)) {
2208                         *resetp = 1;
2209                         return -EBUSY;
2210                 }
2211
2212                 for (i = 0; i < 6; i += 2) {
2213                         u32 low, high;
2214
2215                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2216                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2217                             tg3_wait_macro_done(tp)) {
2218                                 *resetp = 1;
2219                                 return -EBUSY;
2220                         }
2221                         low &= 0x7fff;
2222                         high &= 0x000f;
2223                         if (low != test_pat[chan][i] ||
2224                             high != test_pat[chan][i+1]) {
2225                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2226                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2227                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2228
2229                                 return -EBUSY;
2230                         }
2231                 }
2232         }
2233
2234         return 0;
2235 }
2236
2237 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2238 {
2239         int chan;
2240
2241         for (chan = 0; chan < 4; chan++) {
2242                 int i;
2243
2244                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2245                              (chan * 0x2000) | 0x0200);
2246                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2247                 for (i = 0; i < 6; i++)
2248                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2249                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2250                 if (tg3_wait_macro_done(tp))
2251                         return -EBUSY;
2252         }
2253
2254         return 0;
2255 }
2256
2257 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2258 {
2259         u32 reg32, phy9_orig;
2260         int retries, do_phy_reset, err;
2261
2262         retries = 10;
2263         do_phy_reset = 1;
2264         do {
2265                 if (do_phy_reset) {
2266                         err = tg3_bmcr_reset(tp);
2267                         if (err)
2268                                 return err;
2269                         do_phy_reset = 0;
2270                 }
2271
2272                 /* Disable transmitter and interrupt.  */
2273                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2274                         continue;
2275
2276                 reg32 |= 0x3000;
2277                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2278
2279                 /* Set full-duplex, 1000 mbps.  */
2280                 tg3_writephy(tp, MII_BMCR,
2281                              BMCR_FULLDPLX | BMCR_SPEED1000);
2282
2283                 /* Set to master mode.  */
2284                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2285                         continue;
2286
2287                 tg3_writephy(tp, MII_CTRL1000,
2288                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2289
2290                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2291                 if (err)
2292                         return err;
2293
2294                 /* Block the PHY control access.  */
2295                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2296
2297                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2298                 if (!err)
2299                         break;
2300         } while (--retries);
2301
2302         err = tg3_phy_reset_chanpat(tp);
2303         if (err)
2304                 return err;
2305
2306         tg3_phydsp_write(tp, 0x8005, 0x0000);
2307
2308         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2309         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2310
2311         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2312
2313         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2314
2315         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2316                 reg32 &= ~0x3000;
2317                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2318         } else if (!err)
2319                 err = -EBUSY;
2320
2321         return err;
2322 }
2323
2324 /* This will reset the tigon3 PHY if there is no valid
2325  * link unless the FORCE argument is non-zero.
2326  */
2327 static int tg3_phy_reset(struct tg3 *tp)
2328 {
2329         u32 val, cpmuctrl;
2330         int err;
2331
2332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2333                 val = tr32(GRC_MISC_CFG);
2334                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2335                 udelay(40);
2336         }
2337         err  = tg3_readphy(tp, MII_BMSR, &val);
2338         err |= tg3_readphy(tp, MII_BMSR, &val);
2339         if (err != 0)
2340                 return -EBUSY;
2341
2342         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2343                 netif_carrier_off(tp->dev);
2344                 tg3_link_report(tp);
2345         }
2346
2347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2348             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2350                 err = tg3_phy_reset_5703_4_5(tp);
2351                 if (err)
2352                         return err;
2353                 goto out;
2354         }
2355
2356         cpmuctrl = 0;
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2358             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2359                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2360                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2361                         tw32(TG3_CPMU_CTRL,
2362                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2363         }
2364
2365         err = tg3_bmcr_reset(tp);
2366         if (err)
2367                 return err;
2368
2369         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2370                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2371                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2372
2373                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2374         }
2375
2376         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2377             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2378                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2379                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2380                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2381                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2382                         udelay(40);
2383                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2384                 }
2385         }
2386
2387         if (tg3_flag(tp, 5717_PLUS) &&
2388             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2389                 return 0;
2390
2391         tg3_phy_apply_otp(tp);
2392
2393         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2394                 tg3_phy_toggle_apd(tp, true);
2395         else
2396                 tg3_phy_toggle_apd(tp, false);
2397
2398 out:
2399         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2400             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2401                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2402                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2403                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2404         }
2405
2406         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2407                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2408                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2409         }
2410
2411         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2412                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2413                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2414                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2415                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2416                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417                 }
2418         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2419                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2420                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2421                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2422                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2423                                 tg3_writephy(tp, MII_TG3_TEST1,
2424                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2425                         } else
2426                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2427
2428                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429                 }
2430         }
2431
2432         /* Set Extended packet length bit (bit 14) on all chips that */
2433         /* support jumbo frames */
2434         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2435                 /* Cannot do read-modify-write on 5401 */
2436                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2437         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2438                 /* Set bit 14 with read-modify-write to preserve other bits */
2439                 err = tg3_phy_auxctl_read(tp,
2440                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2441                 if (!err)
2442                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2443                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2444         }
2445
2446         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2447          * jumbo frames transmission.
2448          */
2449         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2450                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2451                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2452                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2453         }
2454
2455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2456                 /* adjust output voltage */
2457                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2458         }
2459
2460         tg3_phy_toggle_automdix(tp, 1);
2461         tg3_phy_set_wirespeed(tp);
2462         return 0;
2463 }
2464
2465 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2466 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2467 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2468                                           TG3_GPIO_MSG_NEED_VAUX)
2469 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2470         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2471          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2472          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2473          (TG3_GPIO_MSG_DRVR_PRES << 12))
2474
2475 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2476         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2477          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2478          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2479          (TG3_GPIO_MSG_NEED_VAUX << 12))
2480
2481 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2482 {
2483         u32 status, shift;
2484
2485         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2487                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2488         else
2489                 status = tr32(TG3_CPMU_DRV_STATUS);
2490
2491         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2492         status &= ~(TG3_GPIO_MSG_MASK << shift);
2493         status |= (newstat << shift);
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2498         else
2499                 tw32(TG3_CPMU_DRV_STATUS, status);
2500
2501         return status >> TG3_APE_GPIO_MSG_SHIFT;
2502 }
2503
2504 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2505 {
2506         if (!tg3_flag(tp, IS_NIC))
2507                 return 0;
2508
2509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2510             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2512                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2513                         return -EIO;
2514
2515                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2516
2517                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2518                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2519
2520                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2521         } else {
2522                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2523                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2524         }
2525
2526         return 0;
2527 }
2528
2529 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2530 {
2531         u32 grc_local_ctrl;
2532
2533         if (!tg3_flag(tp, IS_NIC) ||
2534             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2536                 return;
2537
2538         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2539
2540         tw32_wait_f(GRC_LOCAL_CTRL,
2541                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2542                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2543
2544         tw32_wait_f(GRC_LOCAL_CTRL,
2545                     grc_local_ctrl,
2546                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2547
2548         tw32_wait_f(GRC_LOCAL_CTRL,
2549                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2550                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2551 }
2552
2553 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2554 {
2555         if (!tg3_flag(tp, IS_NIC))
2556                 return;
2557
2558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2559             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2560                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2561                             (GRC_LCLCTRL_GPIO_OE0 |
2562                              GRC_LCLCTRL_GPIO_OE1 |
2563                              GRC_LCLCTRL_GPIO_OE2 |
2564                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2565                              GRC_LCLCTRL_GPIO_OUTPUT1),
2566                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2567         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2568                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2569                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2570                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2571                                      GRC_LCLCTRL_GPIO_OE1 |
2572                                      GRC_LCLCTRL_GPIO_OE2 |
2573                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2574                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2575                                      tp->grc_local_ctrl;
2576                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2577                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2578
2579                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2580                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2581                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2582
2583                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2584                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2585                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2586         } else {
2587                 u32 no_gpio2;
2588                 u32 grc_local_ctrl = 0;
2589
2590                 /* Workaround to prevent overdrawing Amps. */
2591                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2592                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2593                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2594                                     grc_local_ctrl,
2595                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2596                 }
2597
2598                 /* On 5753 and variants, GPIO2 cannot be used. */
2599                 no_gpio2 = tp->nic_sram_data_cfg &
2600                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2601
2602                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2603                                   GRC_LCLCTRL_GPIO_OE1 |
2604                                   GRC_LCLCTRL_GPIO_OE2 |
2605                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2606                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2607                 if (no_gpio2) {
2608                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2609                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2610                 }
2611                 tw32_wait_f(GRC_LOCAL_CTRL,
2612                             tp->grc_local_ctrl | grc_local_ctrl,
2613                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2614
2615                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2616
2617                 tw32_wait_f(GRC_LOCAL_CTRL,
2618                             tp->grc_local_ctrl | grc_local_ctrl,
2619                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2620
2621                 if (!no_gpio2) {
2622                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2623                         tw32_wait_f(GRC_LOCAL_CTRL,
2624                                     tp->grc_local_ctrl | grc_local_ctrl,
2625                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2626                 }
2627         }
2628 }
2629
2630 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2631 {
2632         u32 msg = 0;
2633
2634         /* Serialize power state transitions */
2635         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2636                 return;
2637
2638         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2639                 msg = TG3_GPIO_MSG_NEED_VAUX;
2640
2641         msg = tg3_set_function_status(tp, msg);
2642
2643         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2644                 goto done;
2645
2646         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2647                 tg3_pwrsrc_switch_to_vaux(tp);
2648         else
2649                 tg3_pwrsrc_die_with_vmain(tp);
2650
2651 done:
2652         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2653 }
2654
2655 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2656 {
2657         bool need_vaux = false;
2658
2659         /* The GPIOs do something completely different on 57765. */
2660         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2661                 return;
2662
2663         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2665             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2666                 tg3_frob_aux_power_5717(tp, include_wol ?
2667                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2668                 return;
2669         }
2670
2671         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2672                 struct net_device *dev_peer;
2673
2674                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2675
2676                 /* remove_one() may have been run on the peer. */
2677                 if (dev_peer) {
2678                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2679
2680                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2681                                 return;
2682
2683                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2684                             tg3_flag(tp_peer, ENABLE_ASF))
2685                                 need_vaux = true;
2686                 }
2687         }
2688
2689         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2690             tg3_flag(tp, ENABLE_ASF))
2691                 need_vaux = true;
2692
2693         if (need_vaux)
2694                 tg3_pwrsrc_switch_to_vaux(tp);
2695         else
2696                 tg3_pwrsrc_die_with_vmain(tp);
2697 }
2698
2699 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2700 {
2701         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2702                 return 1;
2703         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2704                 if (speed != SPEED_10)
2705                         return 1;
2706         } else if (speed == SPEED_10)
2707                 return 1;
2708
2709         return 0;
2710 }
2711
2712 static int tg3_setup_phy(struct tg3 *, int);
2713 static int tg3_halt_cpu(struct tg3 *, u32);
2714
2715 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2716 {
2717         u32 val;
2718
2719         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2720                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2721                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2722                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2723
2724                         sg_dig_ctrl |=
2725                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2726                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2727                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2728                 }
2729                 return;
2730         }
2731
2732         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2733                 tg3_bmcr_reset(tp);
2734                 val = tr32(GRC_MISC_CFG);
2735                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2736                 udelay(40);
2737                 return;
2738         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2739                 u32 phytest;
2740                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2741                         u32 phy;
2742
2743                         tg3_writephy(tp, MII_ADVERTISE, 0);
2744                         tg3_writephy(tp, MII_BMCR,
2745                                      BMCR_ANENABLE | BMCR_ANRESTART);
2746
2747                         tg3_writephy(tp, MII_TG3_FET_TEST,
2748                                      phytest | MII_TG3_FET_SHADOW_EN);
2749                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2750                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2751                                 tg3_writephy(tp,
2752                                              MII_TG3_FET_SHDW_AUXMODE4,
2753                                              phy);
2754                         }
2755                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2756                 }
2757                 return;
2758         } else if (do_low_power) {
2759                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2760                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2761
2762                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2763                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2764                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2765                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2766         }
2767
2768         /* The PHY should not be powered down on some chips because
2769          * of bugs.
2770          */
2771         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2772             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2773             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2774              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2775                 return;
2776
2777         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2778             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2779                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2780                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2781                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2782                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2783         }
2784
2785         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2786 }
2787
2788 /* tp->lock is held. */
2789 static int tg3_nvram_lock(struct tg3 *tp)
2790 {
2791         if (tg3_flag(tp, NVRAM)) {
2792                 int i;
2793
2794                 if (tp->nvram_lock_cnt == 0) {
2795                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2796                         for (i = 0; i < 8000; i++) {
2797                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2798                                         break;
2799                                 udelay(20);
2800                         }
2801                         if (i == 8000) {
2802                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2803                                 return -ENODEV;
2804                         }
2805                 }
2806                 tp->nvram_lock_cnt++;
2807         }
2808         return 0;
2809 }
2810
2811 /* tp->lock is held. */
2812 static void tg3_nvram_unlock(struct tg3 *tp)
2813 {
2814         if (tg3_flag(tp, NVRAM)) {
2815                 if (tp->nvram_lock_cnt > 0)
2816                         tp->nvram_lock_cnt--;
2817                 if (tp->nvram_lock_cnt == 0)
2818                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2819         }
2820 }
2821
2822 /* tp->lock is held. */
2823 static void tg3_enable_nvram_access(struct tg3 *tp)
2824 {
2825         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2826                 u32 nvaccess = tr32(NVRAM_ACCESS);
2827
2828                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2829         }
2830 }
2831
2832 /* tp->lock is held. */
2833 static void tg3_disable_nvram_access(struct tg3 *tp)
2834 {
2835         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2836                 u32 nvaccess = tr32(NVRAM_ACCESS);
2837
2838                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2839         }
2840 }
2841
2842 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2843                                         u32 offset, u32 *val)
2844 {
2845         u32 tmp;
2846         int i;
2847
2848         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2849                 return -EINVAL;
2850
2851         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2852                                         EEPROM_ADDR_DEVID_MASK |
2853                                         EEPROM_ADDR_READ);
2854         tw32(GRC_EEPROM_ADDR,
2855              tmp |
2856              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2857              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2858               EEPROM_ADDR_ADDR_MASK) |
2859              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2860
2861         for (i = 0; i < 1000; i++) {
2862                 tmp = tr32(GRC_EEPROM_ADDR);
2863
2864                 if (tmp & EEPROM_ADDR_COMPLETE)
2865                         break;
2866                 msleep(1);
2867         }
2868         if (!(tmp & EEPROM_ADDR_COMPLETE))
2869                 return -EBUSY;
2870
2871         tmp = tr32(GRC_EEPROM_DATA);
2872
2873         /*
2874          * The data will always be opposite the native endian
2875          * format.  Perform a blind byteswap to compensate.
2876          */
2877         *val = swab32(tmp);
2878
2879         return 0;
2880 }
2881
2882 #define NVRAM_CMD_TIMEOUT 10000
2883
2884 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2885 {
2886         int i;
2887
2888         tw32(NVRAM_CMD, nvram_cmd);
2889         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2890                 udelay(10);
2891                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2892                         udelay(10);
2893                         break;
2894                 }
2895         }
2896
2897         if (i == NVRAM_CMD_TIMEOUT)
2898                 return -EBUSY;
2899
2900         return 0;
2901 }
2902
2903 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2904 {
2905         if (tg3_flag(tp, NVRAM) &&
2906             tg3_flag(tp, NVRAM_BUFFERED) &&
2907             tg3_flag(tp, FLASH) &&
2908             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2909             (tp->nvram_jedecnum == JEDEC_ATMEL))
2910
2911                 addr = ((addr / tp->nvram_pagesize) <<
2912                         ATMEL_AT45DB0X1B_PAGE_POS) +
2913                        (addr % tp->nvram_pagesize);
2914
2915         return addr;
2916 }
2917
2918 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2919 {
2920         if (tg3_flag(tp, NVRAM) &&
2921             tg3_flag(tp, NVRAM_BUFFERED) &&
2922             tg3_flag(tp, FLASH) &&
2923             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2924             (tp->nvram_jedecnum == JEDEC_ATMEL))
2925
2926                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2927                         tp->nvram_pagesize) +
2928                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2929
2930         return addr;
2931 }
2932
2933 /* NOTE: Data read in from NVRAM is byteswapped according to
2934  * the byteswapping settings for all other register accesses.
2935  * tg3 devices are BE devices, so on a BE machine, the data
2936  * returned will be exactly as it is seen in NVRAM.  On a LE
2937  * machine, the 32-bit value will be byteswapped.
2938  */
2939 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2940 {
2941         int ret;
2942
2943         if (!tg3_flag(tp, NVRAM))
2944                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2945
2946         offset = tg3_nvram_phys_addr(tp, offset);
2947
2948         if (offset > NVRAM_ADDR_MSK)
2949                 return -EINVAL;
2950
2951         ret = tg3_nvram_lock(tp);
2952         if (ret)
2953                 return ret;
2954
2955         tg3_enable_nvram_access(tp);
2956
2957         tw32(NVRAM_ADDR, offset);
2958         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2959                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2960
2961         if (ret == 0)
2962                 *val = tr32(NVRAM_RDDATA);
2963
2964         tg3_disable_nvram_access(tp);
2965
2966         tg3_nvram_unlock(tp);
2967
2968         return ret;
2969 }
2970
2971 /* Ensures NVRAM data is in bytestream format. */
2972 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2973 {
2974         u32 v;
2975         int res = tg3_nvram_read(tp, offset, &v);
2976         if (!res)
2977                 *val = cpu_to_be32(v);
2978         return res;
2979 }
2980
2981 #define RX_CPU_SCRATCH_BASE     0x30000
2982 #define RX_CPU_SCRATCH_SIZE     0x04000
2983 #define TX_CPU_SCRATCH_BASE     0x34000
2984 #define TX_CPU_SCRATCH_SIZE     0x04000
2985
2986 /* tp->lock is held. */
2987 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
2988 {
2989         int i;
2990
2991         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
2992
2993         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2994                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
2995
2996                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
2997                 return 0;
2998         }
2999         if (offset == RX_CPU_BASE) {
3000                 for (i = 0; i < 10000; i++) {
3001                         tw32(offset + CPU_STATE, 0xffffffff);
3002                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3003                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3004                                 break;
3005                 }
3006
3007                 tw32(offset + CPU_STATE, 0xffffffff);
3008                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3009                 udelay(10);
3010         } else {
3011                 for (i = 0; i < 10000; i++) {
3012                         tw32(offset + CPU_STATE, 0xffffffff);
3013                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3014                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3015                                 break;
3016                 }
3017         }
3018
3019         if (i >= 10000) {
3020                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3021                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3022                 return -ENODEV;
3023         }
3024
3025         /* Clear firmware's nvram arbitration. */
3026         if (tg3_flag(tp, NVRAM))
3027                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3028         return 0;
3029 }
3030
3031 struct fw_info {
3032         unsigned int fw_base;
3033         unsigned int fw_len;
3034         const __be32 *fw_data;
3035 };
3036
3037 /* tp->lock is held. */
3038 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3039                                  u32 cpu_scratch_base, int cpu_scratch_size,
3040                                  struct fw_info *info)
3041 {
3042         int err, lock_err, i;
3043         void (*write_op)(struct tg3 *, u32, u32);
3044
3045         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3046                 netdev_err(tp->dev,
3047                            "%s: Trying to load TX cpu firmware which is 5705\n",
3048                            __func__);
3049                 return -EINVAL;
3050         }
3051
3052         if (tg3_flag(tp, 5705_PLUS))
3053                 write_op = tg3_write_mem;
3054         else
3055                 write_op = tg3_write_indirect_reg32;
3056
3057         /* It is possible that bootcode is still loading at this point.
3058          * Get the nvram lock first before halting the cpu.
3059          */
3060         lock_err = tg3_nvram_lock(tp);
3061         err = tg3_halt_cpu(tp, cpu_base);
3062         if (!lock_err)
3063                 tg3_nvram_unlock(tp);
3064         if (err)
3065                 goto out;
3066
3067         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3068                 write_op(tp, cpu_scratch_base + i, 0);
3069         tw32(cpu_base + CPU_STATE, 0xffffffff);
3070         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3071         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3072                 write_op(tp, (cpu_scratch_base +
3073                               (info->fw_base & 0xffff) +
3074                               (i * sizeof(u32))),
3075                               be32_to_cpu(info->fw_data[i]));
3076
3077         err = 0;
3078
3079 out:
3080         return err;
3081 }
3082
3083 /* tp->lock is held. */
3084 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3085 {
3086         struct fw_info info;
3087         const __be32 *fw_data;
3088         int err, i;
3089
3090         fw_data = (void *)tp->fw->data;
3091
3092         /* Firmware blob starts with version numbers, followed by
3093            start address and length. We are setting complete length.
3094            length = end_address_of_bss - start_address_of_text.
3095            Remainder is the blob to be loaded contiguously
3096            from start address. */
3097
3098         info.fw_base = be32_to_cpu(fw_data[1]);
3099         info.fw_len = tp->fw->size - 12;
3100         info.fw_data = &fw_data[3];
3101
3102         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3103                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3104                                     &info);
3105         if (err)
3106                 return err;
3107
3108         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3109                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3110                                     &info);
3111         if (err)
3112                 return err;
3113
3114         /* Now startup only the RX cpu. */
3115         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3116         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3117
3118         for (i = 0; i < 5; i++) {
3119                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3120                         break;
3121                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3122                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3123                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3124                 udelay(1000);
3125         }
3126         if (i >= 5) {
3127                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3128                            "should be %08x\n", __func__,
3129                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3130                 return -ENODEV;
3131         }
3132         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3133         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3134
3135         return 0;
3136 }
3137
3138 /* tp->lock is held. */
3139 static int tg3_load_tso_firmware(struct tg3 *tp)
3140 {
3141         struct fw_info info;
3142         const __be32 *fw_data;
3143         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3144         int err, i;
3145
3146         if (tg3_flag(tp, HW_TSO_1) ||
3147             tg3_flag(tp, HW_TSO_2) ||
3148             tg3_flag(tp, HW_TSO_3))
3149                 return 0;
3150
3151         fw_data = (void *)tp->fw->data;
3152
3153         /* Firmware blob starts with version numbers, followed by
3154            start address and length. We are setting complete length.
3155            length = end_address_of_bss - start_address_of_text.
3156            Remainder is the blob to be loaded contiguously
3157            from start address. */
3158
3159         info.fw_base = be32_to_cpu(fw_data[1]);
3160         cpu_scratch_size = tp->fw_len;
3161         info.fw_len = tp->fw->size - 12;
3162         info.fw_data = &fw_data[3];
3163
3164         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3165                 cpu_base = RX_CPU_BASE;
3166                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3167         } else {
3168                 cpu_base = TX_CPU_BASE;
3169                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3170                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3171         }
3172
3173         err = tg3_load_firmware_cpu(tp, cpu_base,
3174                                     cpu_scratch_base, cpu_scratch_size,
3175                                     &info);
3176         if (err)
3177                 return err;
3178
3179         /* Now startup the cpu. */
3180         tw32(cpu_base + CPU_STATE, 0xffffffff);
3181         tw32_f(cpu_base + CPU_PC, info.fw_base);
3182
3183         for (i = 0; i < 5; i++) {
3184                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3185                         break;
3186                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3187                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3188                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3189                 udelay(1000);
3190         }
3191         if (i >= 5) {
3192                 netdev_err(tp->dev,
3193                            "%s fails to set CPU PC, is %08x should be %08x\n",
3194                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3195                 return -ENODEV;
3196         }
3197         tw32(cpu_base + CPU_STATE, 0xffffffff);
3198         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3199         return 0;
3200 }
3201
3202
3203 /* tp->lock is held. */
3204 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3205 {
3206         u32 addr_high, addr_low;
3207         int i;
3208
3209         addr_high = ((tp->dev->dev_addr[0] << 8) |
3210                      tp->dev->dev_addr[1]);
3211         addr_low = ((tp->dev->dev_addr[2] << 24) |
3212                     (tp->dev->dev_addr[3] << 16) |
3213                     (tp->dev->dev_addr[4] <<  8) |
3214                     (tp->dev->dev_addr[5] <<  0));
3215         for (i = 0; i < 4; i++) {
3216                 if (i == 1 && skip_mac_1)
3217                         continue;
3218                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3219                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3220         }
3221
3222         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3223             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3224                 for (i = 0; i < 12; i++) {
3225                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3226                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3227                 }
3228         }
3229
3230         addr_high = (tp->dev->dev_addr[0] +
3231                      tp->dev->dev_addr[1] +
3232                      tp->dev->dev_addr[2] +
3233                      tp->dev->dev_addr[3] +
3234                      tp->dev->dev_addr[4] +
3235                      tp->dev->dev_addr[5]) &
3236                 TX_BACKOFF_SEED_MASK;
3237         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3238 }
3239
3240 static void tg3_enable_register_access(struct tg3 *tp)
3241 {
3242         /*
3243          * Make sure register accesses (indirect or otherwise) will function
3244          * correctly.
3245          */
3246         pci_write_config_dword(tp->pdev,
3247                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3248 }
3249
3250 static int tg3_power_up(struct tg3 *tp)
3251 {
3252         int err;
3253
3254         tg3_enable_register_access(tp);
3255
3256         err = pci_set_power_state(tp->pdev, PCI_D0);
3257         if (!err) {
3258                 /* Switch out of Vaux if it is a NIC */
3259                 tg3_pwrsrc_switch_to_vmain(tp);
3260         } else {
3261                 netdev_err(tp->dev, "Transition to D0 failed\n");
3262         }
3263
3264         return err;
3265 }
3266
3267 static int tg3_power_down_prepare(struct tg3 *tp)
3268 {
3269         u32 misc_host_ctrl;
3270         bool device_should_wake, do_low_power;
3271
3272         tg3_enable_register_access(tp);
3273
3274         /* Restore the CLKREQ setting. */
3275         if (tg3_flag(tp, CLKREQ_BUG)) {
3276                 u16 lnkctl;
3277
3278                 pci_read_config_word(tp->pdev,
3279                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3280                                      &lnkctl);
3281                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3282                 pci_write_config_word(tp->pdev,
3283                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3284                                       lnkctl);
3285         }
3286
3287         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3288         tw32(TG3PCI_MISC_HOST_CTRL,
3289              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3290
3291         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3292                              tg3_flag(tp, WOL_ENABLE);
3293
3294         if (tg3_flag(tp, USE_PHYLIB)) {
3295                 do_low_power = false;
3296                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3297                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3298                         struct phy_device *phydev;
3299                         u32 phyid, advertising;
3300
3301                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3302
3303                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3304
3305                         tp->link_config.orig_speed = phydev->speed;
3306                         tp->link_config.orig_duplex = phydev->duplex;
3307                         tp->link_config.orig_autoneg = phydev->autoneg;
3308                         tp->link_config.orig_advertising = phydev->advertising;
3309
3310                         advertising = ADVERTISED_TP |
3311                                       ADVERTISED_Pause |
3312                                       ADVERTISED_Autoneg |
3313                                       ADVERTISED_10baseT_Half;
3314
3315                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3316                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3317                                         advertising |=
3318                                                 ADVERTISED_100baseT_Half |
3319                                                 ADVERTISED_100baseT_Full |
3320                                                 ADVERTISED_10baseT_Full;
3321                                 else
3322                                         advertising |= ADVERTISED_10baseT_Full;
3323                         }
3324
3325                         phydev->advertising = advertising;
3326
3327                         phy_start_aneg(phydev);
3328
3329                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3330                         if (phyid != PHY_ID_BCMAC131) {
3331                                 phyid &= PHY_BCM_OUI_MASK;
3332                                 if (phyid == PHY_BCM_OUI_1 ||
3333                                     phyid == PHY_BCM_OUI_2 ||
3334                                     phyid == PHY_BCM_OUI_3)
3335                                         do_low_power = true;
3336                         }
3337                 }
3338         } else {
3339                 do_low_power = true;
3340
3341                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3342                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3343                         tp->link_config.orig_speed = tp->link_config.speed;
3344                         tp->link_config.orig_duplex = tp->link_config.duplex;
3345                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3346                 }
3347
3348                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3349                         tp->link_config.speed = SPEED_10;
3350                         tp->link_config.duplex = DUPLEX_HALF;
3351                         tp->link_config.autoneg = AUTONEG_ENABLE;
3352                         tg3_setup_phy(tp, 0);
3353                 }
3354         }
3355
3356         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3357                 u32 val;
3358
3359                 val = tr32(GRC_VCPU_EXT_CTRL);
3360                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3361         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3362                 int i;
3363                 u32 val;
3364
3365                 for (i = 0; i < 200; i++) {
3366                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3367                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3368                                 break;
3369                         msleep(1);
3370                 }
3371         }
3372         if (tg3_flag(tp, WOL_CAP))
3373                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3374                                                      WOL_DRV_STATE_SHUTDOWN |
3375                                                      WOL_DRV_WOL |
3376                                                      WOL_SET_MAGIC_PKT);
3377
3378         if (device_should_wake) {
3379                 u32 mac_mode;
3380
3381                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3382                         if (do_low_power &&
3383                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3384                                 tg3_phy_auxctl_write(tp,
3385                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3386                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3387                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3388                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3389                                 udelay(40);
3390                         }
3391
3392                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3393                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3394                         else
3395                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3396
3397                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3398                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3399                             ASIC_REV_5700) {
3400                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3401                                              SPEED_100 : SPEED_10;
3402                                 if (tg3_5700_link_polarity(tp, speed))
3403                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3404                                 else
3405                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3406                         }
3407                 } else {
3408                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3409                 }
3410
3411                 if (!tg3_flag(tp, 5750_PLUS))
3412                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3413
3414                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3415                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3416                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3417                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3418
3419                 if (tg3_flag(tp, ENABLE_APE))
3420                         mac_mode |= MAC_MODE_APE_TX_EN |
3421                                     MAC_MODE_APE_RX_EN |
3422                                     MAC_MODE_TDE_ENABLE;
3423
3424                 tw32_f(MAC_MODE, mac_mode);
3425                 udelay(100);
3426
3427                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3428                 udelay(10);
3429         }
3430
3431         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3432             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3433              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3434                 u32 base_val;
3435
3436                 base_val = tp->pci_clock_ctrl;
3437                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3438                              CLOCK_CTRL_TXCLK_DISABLE);
3439
3440                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3441                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3442         } else if (tg3_flag(tp, 5780_CLASS) ||
3443                    tg3_flag(tp, CPMU_PRESENT) ||
3444                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3445                 /* do nothing */
3446         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3447                 u32 newbits1, newbits2;
3448
3449                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3452                                     CLOCK_CTRL_TXCLK_DISABLE |
3453                                     CLOCK_CTRL_ALTCLK);
3454                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3455                 } else if (tg3_flag(tp, 5705_PLUS)) {
3456                         newbits1 = CLOCK_CTRL_625_CORE;
3457                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3458                 } else {
3459                         newbits1 = CLOCK_CTRL_ALTCLK;
3460                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3461                 }
3462
3463                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3464                             40);
3465
3466                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3467                             40);
3468
3469                 if (!tg3_flag(tp, 5705_PLUS)) {
3470                         u32 newbits3;
3471
3472                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3473                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3474                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3475                                             CLOCK_CTRL_TXCLK_DISABLE |
3476                                             CLOCK_CTRL_44MHZ_CORE);
3477                         } else {
3478                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3479                         }
3480
3481                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3482                                     tp->pci_clock_ctrl | newbits3, 40);
3483                 }
3484         }
3485
3486         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3487                 tg3_power_down_phy(tp, do_low_power);
3488
3489         tg3_frob_aux_power(tp, true);
3490
3491         /* Workaround for unstable PLL clock */
3492         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3493             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3494                 u32 val = tr32(0x7d00);
3495
3496                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3497                 tw32(0x7d00, val);
3498                 if (!tg3_flag(tp, ENABLE_ASF)) {
3499                         int err;
3500
3501                         err = tg3_nvram_lock(tp);
3502                         tg3_halt_cpu(tp, RX_CPU_BASE);
3503                         if (!err)
3504                                 tg3_nvram_unlock(tp);
3505                 }
3506         }
3507
3508         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3509
3510         return 0;
3511 }
3512
3513 static void tg3_power_down(struct tg3 *tp)
3514 {
3515         tg3_power_down_prepare(tp);
3516
3517         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3518         pci_set_power_state(tp->pdev, PCI_D3hot);
3519 }
3520
3521 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3522 {
3523         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3524         case MII_TG3_AUX_STAT_10HALF:
3525                 *speed = SPEED_10;
3526                 *duplex = DUPLEX_HALF;
3527                 break;
3528
3529         case MII_TG3_AUX_STAT_10FULL:
3530                 *speed = SPEED_10;
3531                 *duplex = DUPLEX_FULL;
3532                 break;
3533
3534         case MII_TG3_AUX_STAT_100HALF:
3535                 *speed = SPEED_100;
3536                 *duplex = DUPLEX_HALF;
3537                 break;
3538
3539         case MII_TG3_AUX_STAT_100FULL:
3540                 *speed = SPEED_100;
3541                 *duplex = DUPLEX_FULL;
3542                 break;
3543
3544         case MII_TG3_AUX_STAT_1000HALF:
3545                 *speed = SPEED_1000;
3546                 *duplex = DUPLEX_HALF;
3547                 break;
3548
3549         case MII_TG3_AUX_STAT_1000FULL:
3550                 *speed = SPEED_1000;
3551                 *duplex = DUPLEX_FULL;
3552                 break;
3553
3554         default:
3555                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3556                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3557                                  SPEED_10;
3558                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3559                                   DUPLEX_HALF;
3560                         break;
3561                 }
3562                 *speed = SPEED_INVALID;
3563                 *duplex = DUPLEX_INVALID;
3564                 break;
3565         }
3566 }
3567
3568 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3569 {
3570         int err = 0;
3571         u32 val, new_adv;
3572
3573         new_adv = ADVERTISE_CSMA;
3574         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3575         new_adv |= mii_advertise_flowctrl(flowctrl);
3576
3577         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3578         if (err)
3579                 goto done;
3580
3581         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3582                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3583
3584                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3585                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3586                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3587
3588                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3589                 if (err)
3590                         goto done;
3591         }
3592
3593         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3594                 goto done;
3595
3596         tw32(TG3_CPMU_EEE_MODE,
3597              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3598
3599         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3600         if (!err) {
3601                 u32 err2;
3602
3603                 val = 0;
3604                 /* Advertise 100-BaseTX EEE ability */
3605                 if (advertise & ADVERTISED_100baseT_Full)
3606                         val |= MDIO_AN_EEE_ADV_100TX;
3607                 /* Advertise 1000-BaseT EEE ability */
3608                 if (advertise & ADVERTISED_1000baseT_Full)
3609                         val |= MDIO_AN_EEE_ADV_1000T;
3610                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3611                 if (err)
3612                         val = 0;
3613
3614                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3615                 case ASIC_REV_5717:
3616                 case ASIC_REV_57765:
3617                 case ASIC_REV_57766:
3618                 case ASIC_REV_5719:
3619                         /* If we advertised any eee advertisements above... */
3620                         if (val)
3621                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3622                                       MII_TG3_DSP_TAP26_RMRXSTO |
3623                                       MII_TG3_DSP_TAP26_OPCSINPT;
3624                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3625                         /* Fall through */
3626                 case ASIC_REV_5720:
3627                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3628                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3629                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3630                 }
3631
3632                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3633                 if (!err)
3634                         err = err2;
3635         }
3636
3637 done:
3638         return err;
3639 }
3640
3641 static void tg3_phy_copper_begin(struct tg3 *tp)
3642 {
3643         u32 new_adv;
3644         int i;
3645
3646         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3647                 new_adv = ADVERTISED_10baseT_Half |
3648                           ADVERTISED_10baseT_Full;
3649                 if (tg3_flag(tp, WOL_SPEED_100MB))
3650                         new_adv |= ADVERTISED_100baseT_Half |
3651                                    ADVERTISED_100baseT_Full;
3652
3653                 tg3_phy_autoneg_cfg(tp, new_adv,
3654                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3655         } else if (tp->link_config.speed == SPEED_INVALID) {
3656                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3657                         tp->link_config.advertising &=
3658                                 ~(ADVERTISED_1000baseT_Half |
3659                                   ADVERTISED_1000baseT_Full);
3660
3661                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3662                                     tp->link_config.flowctrl);
3663         } else {
3664                 /* Asking for a specific link mode. */
3665                 if (tp->link_config.speed == SPEED_1000) {
3666                         if (tp->link_config.duplex == DUPLEX_FULL)
3667                                 new_adv = ADVERTISED_1000baseT_Full;
3668                         else
3669                                 new_adv = ADVERTISED_1000baseT_Half;
3670                 } else if (tp->link_config.speed == SPEED_100) {
3671                         if (tp->link_config.duplex == DUPLEX_FULL)
3672                                 new_adv = ADVERTISED_100baseT_Full;
3673                         else
3674                                 new_adv = ADVERTISED_100baseT_Half;
3675                 } else {
3676                         if (tp->link_config.duplex == DUPLEX_FULL)
3677                                 new_adv = ADVERTISED_10baseT_Full;
3678                         else
3679                                 new_adv = ADVERTISED_10baseT_Half;
3680                 }
3681
3682                 tg3_phy_autoneg_cfg(tp, new_adv,
3683                                     tp->link_config.flowctrl);
3684         }
3685
3686         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3687             tp->link_config.speed != SPEED_INVALID) {
3688                 u32 bmcr, orig_bmcr;
3689
3690                 tp->link_config.active_speed = tp->link_config.speed;
3691                 tp->link_config.active_duplex = tp->link_config.duplex;
3692
3693                 bmcr = 0;
3694                 switch (tp->link_config.speed) {
3695                 default:
3696                 case SPEED_10:
3697                         break;
3698
3699                 case SPEED_100:
3700                         bmcr |= BMCR_SPEED100;
3701                         break;
3702
3703                 case SPEED_1000:
3704                         bmcr |= BMCR_SPEED1000;
3705                         break;
3706                 }
3707
3708                 if (tp->link_config.duplex == DUPLEX_FULL)
3709                         bmcr |= BMCR_FULLDPLX;
3710
3711                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3712                     (bmcr != orig_bmcr)) {
3713                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3714                         for (i = 0; i < 1500; i++) {
3715                                 u32 tmp;
3716
3717                                 udelay(10);
3718                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3719                                     tg3_readphy(tp, MII_BMSR, &tmp))
3720                                         continue;
3721                                 if (!(tmp & BMSR_LSTATUS)) {
3722                                         udelay(40);
3723                                         break;
3724                                 }
3725                         }
3726                         tg3_writephy(tp, MII_BMCR, bmcr);
3727                         udelay(40);
3728                 }
3729         } else {
3730                 tg3_writephy(tp, MII_BMCR,
3731                              BMCR_ANENABLE | BMCR_ANRESTART);
3732         }
3733 }
3734
3735 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3736 {
3737         int err;
3738
3739         /* Turn off tap power management. */
3740         /* Set Extended packet length bit */
3741         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3742
3743         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3744         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3745         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3746         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3747         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3748
3749         udelay(40);
3750
3751         return err;
3752 }
3753
3754 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3755 {
3756         u32 advmsk, tgtadv, advertising;
3757
3758         advertising = tp->link_config.advertising;
3759         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3760
3761         advmsk = ADVERTISE_ALL;
3762         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3763                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3764                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3765         }
3766
3767         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3768                 return false;
3769
3770         if ((*lcladv & advmsk) != tgtadv)
3771                 return false;
3772
3773         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3774                 u32 tg3_ctrl;
3775
3776                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3777
3778                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3779                         return false;
3780
3781                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3782                 if (tg3_ctrl != tgtadv)
3783                         return false;
3784         }
3785
3786         return true;
3787 }
3788
3789 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
3790 {
3791         u32 lpeth = 0;
3792
3793         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3794                 u32 val;
3795
3796                 if (tg3_readphy(tp, MII_STAT1000, &val))
3797                         return false;
3798
3799                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
3800         }
3801
3802         if (tg3_readphy(tp, MII_LPA, rmtadv))
3803                 return false;
3804
3805         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
3806         tp->link_config.rmt_adv = lpeth;
3807
3808         return true;
3809 }
3810
3811 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3812 {
3813         int current_link_up;
3814         u32 bmsr, val;
3815         u32 lcl_adv, rmt_adv;
3816         u16 current_speed;
3817         u8 current_duplex;
3818         int i, err;
3819
3820         tw32(MAC_EVENT, 0);
3821
3822         tw32_f(MAC_STATUS,
3823              (MAC_STATUS_SYNC_CHANGED |
3824               MAC_STATUS_CFG_CHANGED |
3825               MAC_STATUS_MI_COMPLETION |
3826               MAC_STATUS_LNKSTATE_CHANGED));
3827         udelay(40);
3828
3829         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3830                 tw32_f(MAC_MI_MODE,
3831                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3832                 udelay(80);
3833         }
3834
3835         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3836
3837         /* Some third-party PHYs need to be reset on link going
3838          * down.
3839          */
3840         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3841              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3842              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3843             netif_carrier_ok(tp->dev)) {
3844                 tg3_readphy(tp, MII_BMSR, &bmsr);
3845                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3846                     !(bmsr & BMSR_LSTATUS))
3847                         force_reset = 1;
3848         }
3849         if (force_reset)
3850                 tg3_phy_reset(tp);
3851
3852         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3853                 tg3_readphy(tp, MII_BMSR, &bmsr);
3854                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3855                     !tg3_flag(tp, INIT_COMPLETE))
3856                         bmsr = 0;
3857
3858                 if (!(bmsr & BMSR_LSTATUS)) {
3859                         err = tg3_init_5401phy_dsp(tp);
3860                         if (err)
3861                                 return err;
3862
3863                         tg3_readphy(tp, MII_BMSR, &bmsr);
3864                         for (i = 0; i < 1000; i++) {
3865                                 udelay(10);
3866                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3867                                     (bmsr & BMSR_LSTATUS)) {
3868                                         udelay(40);
3869                                         break;
3870                                 }
3871                         }
3872
3873                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3874                             TG3_PHY_REV_BCM5401_B0 &&
3875                             !(bmsr & BMSR_LSTATUS) &&
3876                             tp->link_config.active_speed == SPEED_1000) {
3877                                 err = tg3_phy_reset(tp);
3878                                 if (!err)
3879                                         err = tg3_init_5401phy_dsp(tp);
3880                                 if (err)
3881                                         return err;
3882                         }
3883                 }
3884         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3885                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3886                 /* 5701 {A0,B0} CRC bug workaround */
3887                 tg3_writephy(tp, 0x15, 0x0a75);
3888                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3889                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3890                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3891         }
3892
3893         /* Clear pending interrupts... */
3894         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3895         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3896
3897         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3898                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3899         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3900                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3901
3902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3903             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3904                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3905                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3906                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3907                 else
3908                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3909         }
3910
3911         current_link_up = 0;
3912         current_speed = SPEED_INVALID;
3913         current_duplex = DUPLEX_INVALID;
3914         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
3915         tp->link_config.rmt_adv = 0;
3916
3917         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3918                 err = tg3_phy_auxctl_read(tp,
3919                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3920                                           &val);
3921                 if (!err && !(val & (1 << 10))) {
3922                         tg3_phy_auxctl_write(tp,
3923                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3924                                              val | (1 << 10));
3925                         goto relink;
3926                 }
3927         }
3928
3929         bmsr = 0;
3930         for (i = 0; i < 100; i++) {
3931                 tg3_readphy(tp, MII_BMSR, &bmsr);
3932                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3933                     (bmsr & BMSR_LSTATUS))
3934                         break;
3935                 udelay(40);
3936         }
3937
3938         if (bmsr & BMSR_LSTATUS) {
3939                 u32 aux_stat, bmcr;
3940
3941                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3942                 for (i = 0; i < 2000; i++) {
3943                         udelay(10);
3944                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3945                             aux_stat)
3946                                 break;
3947                 }
3948
3949                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3950                                              &current_speed,
3951                                              &current_duplex);
3952
3953                 bmcr = 0;
3954                 for (i = 0; i < 200; i++) {
3955                         tg3_readphy(tp, MII_BMCR, &bmcr);
3956                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3957                                 continue;
3958                         if (bmcr && bmcr != 0x7fff)
3959                                 break;
3960                         udelay(10);
3961                 }
3962
3963                 lcl_adv = 0;
3964                 rmt_adv = 0;
3965
3966                 tp->link_config.active_speed = current_speed;
3967                 tp->link_config.active_duplex = current_duplex;
3968
3969                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3970                         if ((bmcr & BMCR_ANENABLE) &&
3971                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
3972                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
3973                                 current_link_up = 1;
3974                 } else {
3975                         if (!(bmcr & BMCR_ANENABLE) &&
3976                             tp->link_config.speed == current_speed &&
3977                             tp->link_config.duplex == current_duplex &&
3978                             tp->link_config.flowctrl ==
3979                             tp->link_config.active_flowctrl) {
3980                                 current_link_up = 1;
3981                         }
3982                 }
3983
3984                 if (current_link_up == 1 &&
3985                     tp->link_config.active_duplex == DUPLEX_FULL) {
3986                         u32 reg, bit;
3987
3988                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3989                                 reg = MII_TG3_FET_GEN_STAT;
3990                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
3991                         } else {
3992                                 reg = MII_TG3_EXT_STAT;
3993                                 bit = MII_TG3_EXT_STAT_MDIX;
3994                         }
3995
3996                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
3997                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
3998
3999                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4000                 }
4001         }
4002
4003 relink:
4004         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4005                 tg3_phy_copper_begin(tp);
4006
4007                 tg3_readphy(tp, MII_BMSR, &bmsr);
4008                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4009                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4010                         current_link_up = 1;
4011         }
4012
4013         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4014         if (current_link_up == 1) {
4015                 if (tp->link_config.active_speed == SPEED_100 ||
4016                     tp->link_config.active_speed == SPEED_10)
4017                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4018                 else
4019                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4020         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4021                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4022         else
4023                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4024
4025         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4026         if (tp->link_config.active_duplex == DUPLEX_HALF)
4027                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4028
4029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4030                 if (current_link_up == 1 &&
4031                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4032                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4033                 else
4034                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4035         }
4036
4037         /* ??? Without this setting Netgear GA302T PHY does not
4038          * ??? send/receive packets...
4039          */
4040         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4041             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4042                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4043                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4044                 udelay(80);
4045         }
4046
4047         tw32_f(MAC_MODE, tp->mac_mode);
4048         udelay(40);
4049
4050         tg3_phy_eee_adjust(tp, current_link_up);
4051
4052         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4053                 /* Polled via timer. */
4054                 tw32_f(MAC_EVENT, 0);
4055         } else {
4056                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4057         }
4058         udelay(40);
4059
4060         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4061             current_link_up == 1 &&
4062             tp->link_config.active_speed == SPEED_1000 &&
4063             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4064                 udelay(120);
4065                 tw32_f(MAC_STATUS,
4066                      (MAC_STATUS_SYNC_CHANGED |
4067                       MAC_STATUS_CFG_CHANGED));
4068                 udelay(40);
4069                 tg3_write_mem(tp,
4070                               NIC_SRAM_FIRMWARE_MBOX,
4071                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4072         }
4073
4074         /* Prevent send BD corruption. */
4075         if (tg3_flag(tp, CLKREQ_BUG)) {
4076                 u16 oldlnkctl, newlnkctl;
4077
4078                 pci_read_config_word(tp->pdev,
4079                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4080                                      &oldlnkctl);
4081                 if (tp->link_config.active_speed == SPEED_100 ||
4082                     tp->link_config.active_speed == SPEED_10)
4083                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4084                 else
4085                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4086                 if (newlnkctl != oldlnkctl)
4087                         pci_write_config_word(tp->pdev,
4088                                               pci_pcie_cap(tp->pdev) +
4089                                               PCI_EXP_LNKCTL, newlnkctl);
4090         }
4091
4092         if (current_link_up != netif_carrier_ok(tp->dev)) {
4093                 if (current_link_up)
4094                         netif_carrier_on(tp->dev);
4095                 else
4096                         netif_carrier_off(tp->dev);
4097                 tg3_link_report(tp);
4098         }
4099
4100         return 0;
4101 }
4102
4103 struct tg3_fiber_aneginfo {
4104         int state;
4105 #define ANEG_STATE_UNKNOWN              0
4106 #define ANEG_STATE_AN_ENABLE            1
4107 #define ANEG_STATE_RESTART_INIT         2
4108 #define ANEG_STATE_RESTART              3
4109 #define ANEG_STATE_DISABLE_LINK_OK      4
4110 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4111 #define ANEG_STATE_ABILITY_DETECT       6
4112 #define ANEG_STATE_ACK_DETECT_INIT      7
4113 #define ANEG_STATE_ACK_DETECT           8
4114 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4115 #define ANEG_STATE_COMPLETE_ACK         10
4116 #define ANEG_STATE_IDLE_DETECT_INIT     11
4117 #define ANEG_STATE_IDLE_DETECT          12
4118 #define ANEG_STATE_LINK_OK              13
4119 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4120 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4121
4122         u32 flags;
4123 #define MR_AN_ENABLE            0x00000001
4124 #define MR_RESTART_AN           0x00000002
4125 #define MR_AN_COMPLETE          0x00000004
4126 #define MR_PAGE_RX              0x00000008
4127 #define MR_NP_LOADED            0x00000010
4128 #define MR_TOGGLE_TX            0x00000020
4129 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4130 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4131 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4132 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4133 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4134 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4135 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4136 #define MR_TOGGLE_RX            0x00002000
4137 #define MR_NP_RX                0x00004000
4138
4139 #define MR_LINK_OK              0x80000000
4140
4141         unsigned long link_time, cur_time;
4142
4143         u32 ability_match_cfg;
4144         int ability_match_count;
4145
4146         char ability_match, idle_match, ack_match;
4147
4148         u32 txconfig, rxconfig;
4149 #define ANEG_CFG_NP             0x00000080
4150 #define ANEG_CFG_ACK            0x00000040
4151 #define ANEG_CFG_RF2            0x00000020
4152 #define ANEG_CFG_RF1            0x00000010
4153 #define ANEG_CFG_PS2            0x00000001
4154 #define ANEG_CFG_PS1            0x00008000
4155 #define ANEG_CFG_HD             0x00004000
4156 #define ANEG_CFG_FD             0x00002000
4157 #define ANEG_CFG_INVAL          0x00001f06
4158
4159 };
4160 #define ANEG_OK         0
4161 #define ANEG_DONE       1
4162 #define ANEG_TIMER_ENAB 2
4163 #define ANEG_FAILED     -1
4164
4165 #define ANEG_STATE_SETTLE_TIME  10000
4166
4167 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4168                                    struct tg3_fiber_aneginfo *ap)
4169 {
4170         u16 flowctrl;
4171         unsigned long delta;
4172         u32 rx_cfg_reg;
4173         int ret;
4174
4175         if (ap->state == ANEG_STATE_UNKNOWN) {
4176                 ap->rxconfig = 0;
4177                 ap->link_time = 0;
4178                 ap->cur_time = 0;
4179                 ap->ability_match_cfg = 0;
4180                 ap->ability_match_count = 0;
4181                 ap->ability_match = 0;
4182                 ap->idle_match = 0;
4183                 ap->ack_match = 0;
4184         }
4185         ap->cur_time++;
4186
4187         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4188                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4189
4190                 if (rx_cfg_reg != ap->ability_match_cfg) {
4191                         ap->ability_match_cfg = rx_cfg_reg;
4192                         ap->ability_match = 0;
4193                         ap->ability_match_count = 0;
4194                 } else {
4195                         if (++ap->ability_match_count > 1) {
4196                                 ap->ability_match = 1;
4197                                 ap->ability_match_cfg = rx_cfg_reg;
4198                         }
4199                 }
4200                 if (rx_cfg_reg & ANEG_CFG_ACK)
4201                         ap->ack_match = 1;
4202                 else
4203                         ap->ack_match = 0;
4204
4205                 ap->idle_match = 0;
4206         } else {
4207                 ap->idle_match = 1;
4208                 ap->ability_match_cfg = 0;
4209                 ap->ability_match_count = 0;
4210                 ap->ability_match = 0;
4211                 ap->ack_match = 0;
4212
4213                 rx_cfg_reg = 0;
4214         }
4215
4216         ap->rxconfig = rx_cfg_reg;
4217         ret = ANEG_OK;
4218
4219         switch (ap->state) {
4220         case ANEG_STATE_UNKNOWN:
4221                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4222                         ap->state = ANEG_STATE_AN_ENABLE;
4223
4224                 /* fallthru */
4225         case ANEG_STATE_AN_ENABLE:
4226                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4227                 if (ap->flags & MR_AN_ENABLE) {
4228                         ap->link_time = 0;
4229                         ap->cur_time = 0;
4230                         ap->ability_match_cfg = 0;
4231                         ap->ability_match_count = 0;
4232                         ap->ability_match = 0;
4233                         ap->idle_match = 0;
4234                         ap->ack_match = 0;
4235
4236                         ap->state = ANEG_STATE_RESTART_INIT;
4237                 } else {
4238                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4239                 }
4240                 break;
4241
4242         case ANEG_STATE_RESTART_INIT:
4243                 ap->link_time = ap->cur_time;
4244                 ap->flags &= ~(MR_NP_LOADED);
4245                 ap->txconfig = 0;
4246                 tw32(MAC_TX_AUTO_NEG, 0);
4247                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4248                 tw32_f(MAC_MODE, tp->mac_mode);
4249                 udelay(40);
4250
4251                 ret = ANEG_TIMER_ENAB;
4252                 ap->state = ANEG_STATE_RESTART;
4253
4254                 /* fallthru */
4255         case ANEG_STATE_RESTART:
4256                 delta = ap->cur_time - ap->link_time;
4257                 if (delta > ANEG_STATE_SETTLE_TIME)
4258                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4259                 else
4260                         ret = ANEG_TIMER_ENAB;
4261                 break;
4262
4263         case ANEG_STATE_DISABLE_LINK_OK:
4264                 ret = ANEG_DONE;
4265                 break;
4266
4267         case ANEG_STATE_ABILITY_DETECT_INIT:
4268                 ap->flags &= ~(MR_TOGGLE_TX);
4269                 ap->txconfig = ANEG_CFG_FD;
4270                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4271                 if (flowctrl & ADVERTISE_1000XPAUSE)
4272                         ap->txconfig |= ANEG_CFG_PS1;
4273                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4274                         ap->txconfig |= ANEG_CFG_PS2;
4275                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4276                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4277                 tw32_f(MAC_MODE, tp->mac_mode);
4278                 udelay(40);
4279
4280                 ap->state = ANEG_STATE_ABILITY_DETECT;
4281                 break;
4282
4283         case ANEG_STATE_ABILITY_DETECT:
4284                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4285                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4286                 break;
4287
4288         case ANEG_STATE_ACK_DETECT_INIT:
4289                 ap->txconfig |= ANEG_CFG_ACK;
4290                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4291                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4292                 tw32_f(MAC_MODE, tp->mac_mode);
4293                 udelay(40);
4294
4295                 ap->state = ANEG_STATE_ACK_DETECT;
4296
4297                 /* fallthru */
4298         case ANEG_STATE_ACK_DETECT:
4299                 if (ap->ack_match != 0) {
4300                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4301                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4302                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4303                         } else {
4304                                 ap->state = ANEG_STATE_AN_ENABLE;
4305                         }
4306                 } else if (ap->ability_match != 0 &&
4307                            ap->rxconfig == 0) {
4308                         ap->state = ANEG_STATE_AN_ENABLE;
4309                 }
4310                 break;
4311
4312         case ANEG_STATE_COMPLETE_ACK_INIT:
4313                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4314                         ret = ANEG_FAILED;
4315                         break;
4316                 }
4317                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4318                                MR_LP_ADV_HALF_DUPLEX |
4319                                MR_LP_ADV_SYM_PAUSE |
4320                                MR_LP_ADV_ASYM_PAUSE |
4321                                MR_LP_ADV_REMOTE_FAULT1 |
4322                                MR_LP_ADV_REMOTE_FAULT2 |
4323                                MR_LP_ADV_NEXT_PAGE |
4324                                MR_TOGGLE_RX |
4325                                MR_NP_RX);
4326                 if (ap->rxconfig & ANEG_CFG_FD)
4327                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4328                 if (ap->rxconfig & ANEG_CFG_HD)
4329                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4330                 if (ap->rxconfig & ANEG_CFG_PS1)
4331                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4332                 if (ap->rxconfig & ANEG_CFG_PS2)
4333                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4334                 if (ap->rxconfig & ANEG_CFG_RF1)
4335                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4336                 if (ap->rxconfig & ANEG_CFG_RF2)
4337                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4338                 if (ap->rxconfig & ANEG_CFG_NP)
4339                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4340
4341                 ap->link_time = ap->cur_time;
4342
4343                 ap->flags ^= (MR_TOGGLE_TX);
4344                 if (ap->rxconfig & 0x0008)
4345                         ap->flags |= MR_TOGGLE_RX;
4346                 if (ap->rxconfig & ANEG_CFG_NP)
4347                         ap->flags |= MR_NP_RX;
4348                 ap->flags |= MR_PAGE_RX;
4349
4350                 ap->state = ANEG_STATE_COMPLETE_ACK;
4351                 ret = ANEG_TIMER_ENAB;
4352                 break;
4353
4354         case ANEG_STATE_COMPLETE_ACK:
4355                 if (ap->ability_match != 0 &&
4356                     ap->rxconfig == 0) {
4357                         ap->state = ANEG_STATE_AN_ENABLE;
4358                         break;
4359                 }
4360                 delta = ap->cur_time - ap->link_time;
4361                 if (delta > ANEG_STATE_SETTLE_TIME) {
4362                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4363                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4364                         } else {
4365                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4366                                     !(ap->flags & MR_NP_RX)) {
4367                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4368                                 } else {
4369                                         ret = ANEG_FAILED;
4370                                 }
4371                         }
4372                 }
4373                 break;
4374
4375         case ANEG_STATE_IDLE_DETECT_INIT:
4376                 ap->link_time = ap->cur_time;
4377                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4378                 tw32_f(MAC_MODE, tp->mac_mode);
4379                 udelay(40);
4380
4381                 ap->state = ANEG_STATE_IDLE_DETECT;
4382                 ret = ANEG_TIMER_ENAB;
4383                 break;
4384
4385         case ANEG_STATE_IDLE_DETECT:
4386                 if (ap->ability_match != 0 &&
4387                     ap->rxconfig == 0) {
4388                         ap->state = ANEG_STATE_AN_ENABLE;
4389                         break;
4390                 }
4391                 delta = ap->cur_time - ap->link_time;
4392                 if (delta > ANEG_STATE_SETTLE_TIME) {
4393                         /* XXX another gem from the Broadcom driver :( */
4394                         ap->state = ANEG_STATE_LINK_OK;
4395                 }
4396                 break;
4397
4398         case ANEG_STATE_LINK_OK:
4399                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4400                 ret = ANEG_DONE;
4401                 break;
4402
4403         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4404                 /* ??? unimplemented */
4405                 break;
4406
4407         case ANEG_STATE_NEXT_PAGE_WAIT:
4408                 /* ??? unimplemented */
4409                 break;
4410
4411         default:
4412                 ret = ANEG_FAILED;
4413                 break;
4414         }
4415
4416         return ret;
4417 }
4418
4419 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4420 {
4421         int res = 0;
4422         struct tg3_fiber_aneginfo aninfo;
4423         int status = ANEG_FAILED;
4424         unsigned int tick;
4425         u32 tmp;
4426
4427         tw32_f(MAC_TX_AUTO_NEG, 0);
4428
4429         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4430         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4431         udelay(40);
4432
4433         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4434         udelay(40);
4435
4436         memset(&aninfo, 0, sizeof(aninfo));
4437         aninfo.flags |= MR_AN_ENABLE;
4438         aninfo.state = ANEG_STATE_UNKNOWN;
4439         aninfo.cur_time = 0;
4440         tick = 0;
4441         while (++tick < 195000) {
4442                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4443                 if (status == ANEG_DONE || status == ANEG_FAILED)
4444                         break;
4445
4446                 udelay(1);
4447         }
4448
4449         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4450         tw32_f(MAC_MODE, tp->mac_mode);
4451         udelay(40);
4452
4453         *txflags = aninfo.txconfig;
4454         *rxflags = aninfo.flags;
4455
4456         if (status == ANEG_DONE &&
4457             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4458                              MR_LP_ADV_FULL_DUPLEX)))
4459                 res = 1;
4460
4461         return res;
4462 }
4463
4464 static void tg3_init_bcm8002(struct tg3 *tp)
4465 {
4466         u32 mac_status = tr32(MAC_STATUS);
4467         int i;
4468
4469         /* Reset when initting first time or we have a link. */
4470         if (tg3_flag(tp, INIT_COMPLETE) &&
4471             !(mac_status & MAC_STATUS_PCS_SYNCED))
4472                 return;
4473
4474         /* Set PLL lock range. */
4475         tg3_writephy(tp, 0x16, 0x8007);
4476
4477         /* SW reset */
4478         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4479
4480         /* Wait for reset to complete. */
4481         /* XXX schedule_timeout() ... */
4482         for (i = 0; i < 500; i++)
4483                 udelay(10);
4484
4485         /* Config mode; select PMA/Ch 1 regs. */
4486         tg3_writephy(tp, 0x10, 0x8411);
4487
4488         /* Enable auto-lock and comdet, select txclk for tx. */
4489         tg3_writephy(tp, 0x11, 0x0a10);
4490
4491         tg3_writephy(tp, 0x18, 0x00a0);
4492         tg3_writephy(tp, 0x16, 0x41ff);
4493
4494         /* Assert and deassert POR. */
4495         tg3_writephy(tp, 0x13, 0x0400);
4496         udelay(40);
4497         tg3_writephy(tp, 0x13, 0x0000);
4498
4499         tg3_writephy(tp, 0x11, 0x0a50);
4500         udelay(40);
4501         tg3_writephy(tp, 0x11, 0x0a10);
4502
4503         /* Wait for signal to stabilize */
4504         /* XXX schedule_timeout() ... */
4505         for (i = 0; i < 15000; i++)
4506                 udelay(10);
4507
4508         /* Deselect the channel register so we can read the PHYID
4509          * later.
4510          */
4511         tg3_writephy(tp, 0x10, 0x8011);
4512 }
4513
4514 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4515 {
4516         u16 flowctrl;
4517         u32 sg_dig_ctrl, sg_dig_status;
4518         u32 serdes_cfg, expected_sg_dig_ctrl;
4519         int workaround, port_a;
4520         int current_link_up;
4521
4522         serdes_cfg = 0;
4523         expected_sg_dig_ctrl = 0;
4524         workaround = 0;
4525         port_a = 1;
4526         current_link_up = 0;
4527
4528         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4529             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4530                 workaround = 1;
4531                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4532                         port_a = 0;
4533
4534                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4535                 /* preserve bits 20-23 for voltage regulator */
4536                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4537         }
4538
4539         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4540
4541         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4542                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4543                         if (workaround) {
4544                                 u32 val = serdes_cfg;
4545
4546                                 if (port_a)
4547                                         val |= 0xc010000;
4548                                 else
4549                                         val |= 0x4010000;
4550                                 tw32_f(MAC_SERDES_CFG, val);
4551                         }
4552
4553                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4554                 }
4555                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4556                         tg3_setup_flow_control(tp, 0, 0);
4557                         current_link_up = 1;
4558                 }
4559                 goto out;
4560         }
4561
4562         /* Want auto-negotiation.  */
4563         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4564
4565         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4566         if (flowctrl & ADVERTISE_1000XPAUSE)
4567                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4568         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4569                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4570
4571         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4572                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4573                     tp->serdes_counter &&
4574                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4575                                     MAC_STATUS_RCVD_CFG)) ==
4576                      MAC_STATUS_PCS_SYNCED)) {
4577                         tp->serdes_counter--;
4578                         current_link_up = 1;
4579                         goto out;
4580                 }
4581 restart_autoneg:
4582                 if (workaround)
4583                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4584                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4585                 udelay(5);
4586                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4587
4588                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4589                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4590         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4591                                  MAC_STATUS_SIGNAL_DET)) {
4592                 sg_dig_status = tr32(SG_DIG_STATUS);
4593                 mac_status = tr32(MAC_STATUS);
4594
4595                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4596                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4597                         u32 local_adv = 0, remote_adv = 0;
4598
4599                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4600                                 local_adv |= ADVERTISE_1000XPAUSE;
4601                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4602                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4603
4604                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4605                                 remote_adv |= LPA_1000XPAUSE;
4606                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4607                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4608
4609                         tp->link_config.rmt_adv =
4610                                            mii_adv_to_ethtool_adv_x(remote_adv);
4611
4612                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4613                         current_link_up = 1;
4614                         tp->serdes_counter = 0;
4615                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4616                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4617                         if (tp->serdes_counter)
4618                                 tp->serdes_counter--;
4619                         else {
4620                                 if (workaround) {
4621                                         u32 val = serdes_cfg;
4622
4623                                         if (port_a)
4624                                                 val |= 0xc010000;
4625                                         else
4626                                                 val |= 0x4010000;
4627
4628                                         tw32_f(MAC_SERDES_CFG, val);
4629                                 }
4630
4631                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4632                                 udelay(40);
4633
4634                                 /* Link parallel detection - link is up */
4635                                 /* only if we have PCS_SYNC and not */
4636                                 /* receiving config code words */
4637                                 mac_status = tr32(MAC_STATUS);
4638                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4639                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4640                                         tg3_setup_flow_control(tp, 0, 0);
4641                                         current_link_up = 1;
4642                                         tp->phy_flags |=
4643                                                 TG3_PHYFLG_PARALLEL_DETECT;
4644                                         tp->serdes_counter =
4645                                                 SERDES_PARALLEL_DET_TIMEOUT;
4646                                 } else
4647                                         goto restart_autoneg;
4648                         }
4649                 }
4650         } else {
4651                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4652                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4653         }
4654
4655 out:
4656         return current_link_up;
4657 }
4658
4659 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4660 {
4661         int current_link_up = 0;
4662
4663         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4664                 goto out;
4665
4666         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4667                 u32 txflags, rxflags;
4668                 int i;
4669
4670                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4671                         u32 local_adv = 0, remote_adv = 0;
4672
4673                         if (txflags & ANEG_CFG_PS1)
4674                                 local_adv |= ADVERTISE_1000XPAUSE;
4675                         if (txflags & ANEG_CFG_PS2)
4676                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4677
4678                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4679                                 remote_adv |= LPA_1000XPAUSE;
4680                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4681                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4682
4683                         tp->link_config.rmt_adv =
4684                                            mii_adv_to_ethtool_adv_x(remote_adv);
4685
4686                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4687
4688                         current_link_up = 1;
4689                 }
4690                 for (i = 0; i < 30; i++) {
4691                         udelay(20);
4692                         tw32_f(MAC_STATUS,
4693                                (MAC_STATUS_SYNC_CHANGED |
4694                                 MAC_STATUS_CFG_CHANGED));
4695                         udelay(40);
4696                         if ((tr32(MAC_STATUS) &
4697                              (MAC_STATUS_SYNC_CHANGED |
4698                               MAC_STATUS_CFG_CHANGED)) == 0)
4699                                 break;
4700                 }
4701
4702                 mac_status = tr32(MAC_STATUS);
4703                 if (current_link_up == 0 &&
4704                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4705                     !(mac_status & MAC_STATUS_RCVD_CFG))
4706                         current_link_up = 1;
4707         } else {
4708                 tg3_setup_flow_control(tp, 0, 0);
4709
4710                 /* Forcing 1000FD link up. */
4711                 current_link_up = 1;
4712
4713                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4714                 udelay(40);
4715
4716                 tw32_f(MAC_MODE, tp->mac_mode);
4717                 udelay(40);
4718         }
4719
4720 out:
4721         return current_link_up;
4722 }
4723
4724 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4725 {
4726         u32 orig_pause_cfg;
4727         u16 orig_active_speed;
4728         u8 orig_active_duplex;
4729         u32 mac_status;
4730         int current_link_up;
4731         int i;
4732
4733         orig_pause_cfg = tp->link_config.active_flowctrl;
4734         orig_active_speed = tp->link_config.active_speed;
4735         orig_active_duplex = tp->link_config.active_duplex;
4736
4737         if (!tg3_flag(tp, HW_AUTONEG) &&
4738             netif_carrier_ok(tp->dev) &&
4739             tg3_flag(tp, INIT_COMPLETE)) {
4740                 mac_status = tr32(MAC_STATUS);
4741                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4742                                MAC_STATUS_SIGNAL_DET |
4743                                MAC_STATUS_CFG_CHANGED |
4744                                MAC_STATUS_RCVD_CFG);
4745                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4746                                    MAC_STATUS_SIGNAL_DET)) {
4747                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4748                                             MAC_STATUS_CFG_CHANGED));
4749                         return 0;
4750                 }
4751         }
4752
4753         tw32_f(MAC_TX_AUTO_NEG, 0);
4754
4755         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4756         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4757         tw32_f(MAC_MODE, tp->mac_mode);
4758         udelay(40);
4759
4760         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4761                 tg3_init_bcm8002(tp);
4762
4763         /* Enable link change event even when serdes polling.  */
4764         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4765         udelay(40);
4766
4767         current_link_up = 0;
4768         tp->link_config.rmt_adv = 0;
4769         mac_status = tr32(MAC_STATUS);
4770
4771         if (tg3_flag(tp, HW_AUTONEG))
4772                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4773         else
4774                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4775
4776         tp->napi[0].hw_status->status =
4777                 (SD_STATUS_UPDATED |
4778                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4779
4780         for (i = 0; i < 100; i++) {
4781                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4782                                     MAC_STATUS_CFG_CHANGED));
4783                 udelay(5);
4784                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4785                                          MAC_STATUS_CFG_CHANGED |
4786                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4787                         break;
4788         }
4789
4790         mac_status = tr32(MAC_STATUS);
4791         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4792                 current_link_up = 0;
4793                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4794                     tp->serdes_counter == 0) {
4795                         tw32_f(MAC_MODE, (tp->mac_mode |
4796                                           MAC_MODE_SEND_CONFIGS));
4797                         udelay(1);
4798                         tw32_f(MAC_MODE, tp->mac_mode);
4799                 }
4800         }
4801
4802         if (current_link_up == 1) {
4803                 tp->link_config.active_speed = SPEED_1000;
4804                 tp->link_config.active_duplex = DUPLEX_FULL;
4805                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4806                                     LED_CTRL_LNKLED_OVERRIDE |
4807                                     LED_CTRL_1000MBPS_ON));
4808         } else {
4809                 tp->link_config.active_speed = SPEED_INVALID;
4810                 tp->link_config.active_duplex = DUPLEX_INVALID;
4811                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4812                                     LED_CTRL_LNKLED_OVERRIDE |
4813                                     LED_CTRL_TRAFFIC_OVERRIDE));
4814         }
4815
4816         if (current_link_up != netif_carrier_ok(tp->dev)) {
4817                 if (current_link_up)
4818                         netif_carrier_on(tp->dev);
4819                 else
4820                         netif_carrier_off(tp->dev);
4821                 tg3_link_report(tp);
4822         } else {
4823                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4824                 if (orig_pause_cfg != now_pause_cfg ||
4825                     orig_active_speed != tp->link_config.active_speed ||
4826                     orig_active_duplex != tp->link_config.active_duplex)
4827                         tg3_link_report(tp);
4828         }
4829
4830         return 0;
4831 }
4832
4833 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4834 {
4835         int current_link_up, err = 0;
4836         u32 bmsr, bmcr;
4837         u16 current_speed;
4838         u8 current_duplex;
4839         u32 local_adv, remote_adv;
4840
4841         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4842         tw32_f(MAC_MODE, tp->mac_mode);
4843         udelay(40);
4844
4845         tw32(MAC_EVENT, 0);
4846
4847         tw32_f(MAC_STATUS,
4848              (MAC_STATUS_SYNC_CHANGED |
4849               MAC_STATUS_CFG_CHANGED |
4850               MAC_STATUS_MI_COMPLETION |
4851               MAC_STATUS_LNKSTATE_CHANGED));
4852         udelay(40);
4853
4854         if (force_reset)
4855                 tg3_phy_reset(tp);
4856
4857         current_link_up = 0;
4858         current_speed = SPEED_INVALID;
4859         current_duplex = DUPLEX_INVALID;
4860         tp->link_config.rmt_adv = 0;
4861
4862         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4863         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4864         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4865                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4866                         bmsr |= BMSR_LSTATUS;
4867                 else
4868                         bmsr &= ~BMSR_LSTATUS;
4869         }
4870
4871         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4872
4873         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4874             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4875                 /* do nothing, just check for link up at the end */
4876         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4877                 u32 adv, newadv;
4878
4879                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4880                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4881                                  ADVERTISE_1000XPAUSE |
4882                                  ADVERTISE_1000XPSE_ASYM |
4883                                  ADVERTISE_SLCT);
4884
4885                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4886                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
4887
4888                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
4889                         tg3_writephy(tp, MII_ADVERTISE, newadv);
4890                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4891                         tg3_writephy(tp, MII_BMCR, bmcr);
4892
4893                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4894                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4895                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4896
4897                         return err;
4898                 }
4899         } else {
4900                 u32 new_bmcr;
4901
4902                 bmcr &= ~BMCR_SPEED1000;
4903                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4904
4905                 if (tp->link_config.duplex == DUPLEX_FULL)
4906                         new_bmcr |= BMCR_FULLDPLX;
4907
4908                 if (new_bmcr != bmcr) {
4909                         /* BMCR_SPEED1000 is a reserved bit that needs
4910                          * to be set on write.
4911                          */
4912                         new_bmcr |= BMCR_SPEED1000;
4913
4914                         /* Force a linkdown */
4915                         if (netif_carrier_ok(tp->dev)) {
4916                                 u32 adv;
4917
4918                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4919                                 adv &= ~(ADVERTISE_1000XFULL |
4920                                          ADVERTISE_1000XHALF |
4921                                          ADVERTISE_SLCT);
4922                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4923                                 tg3_writephy(tp, MII_BMCR, bmcr |
4924                                                            BMCR_ANRESTART |
4925                                                            BMCR_ANENABLE);
4926                                 udelay(10);
4927                                 netif_carrier_off(tp->dev);
4928                         }
4929                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4930                         bmcr = new_bmcr;
4931                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4932                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4933                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4934                             ASIC_REV_5714) {
4935                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4936                                         bmsr |= BMSR_LSTATUS;
4937                                 else
4938                                         bmsr &= ~BMSR_LSTATUS;
4939                         }
4940                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4941                 }
4942         }
4943
4944         if (bmsr & BMSR_LSTATUS) {
4945                 current_speed = SPEED_1000;
4946                 current_link_up = 1;
4947                 if (bmcr & BMCR_FULLDPLX)
4948                         current_duplex = DUPLEX_FULL;
4949                 else
4950                         current_duplex = DUPLEX_HALF;
4951
4952                 local_adv = 0;
4953                 remote_adv = 0;
4954
4955                 if (bmcr & BMCR_ANENABLE) {
4956                         u32 common;
4957
4958                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4959                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4960                         common = local_adv & remote_adv;
4961                         if (common & (ADVERTISE_1000XHALF |
4962                                       ADVERTISE_1000XFULL)) {
4963                                 if (common & ADVERTISE_1000XFULL)
4964                                         current_duplex = DUPLEX_FULL;
4965                                 else
4966                                         current_duplex = DUPLEX_HALF;
4967
4968                                 tp->link_config.rmt_adv =
4969                                            mii_adv_to_ethtool_adv_x(remote_adv);
4970                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4971                                 /* Link is up via parallel detect */
4972                         } else {
4973                                 current_link_up = 0;
4974                         }
4975                 }
4976         }
4977
4978         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4979                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4980
4981         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4982         if (tp->link_config.active_duplex == DUPLEX_HALF)
4983                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4984
4985         tw32_f(MAC_MODE, tp->mac_mode);
4986         udelay(40);
4987
4988         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4989
4990         tp->link_config.active_speed = current_speed;
4991         tp->link_config.active_duplex = current_duplex;
4992
4993         if (current_link_up != netif_carrier_ok(tp->dev)) {
4994                 if (current_link_up)
4995                         netif_carrier_on(tp->dev);
4996                 else {
4997                         netif_carrier_off(tp->dev);
4998                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4999                 }
5000                 tg3_link_report(tp);
5001         }
5002         return err;
5003 }
5004
5005 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5006 {
5007         if (tp->serdes_counter) {
5008                 /* Give autoneg time to complete. */
5009                 tp->serdes_counter--;
5010                 return;
5011         }
5012
5013         if (!netif_carrier_ok(tp->dev) &&
5014             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5015                 u32 bmcr;
5016
5017                 tg3_readphy(tp, MII_BMCR, &bmcr);
5018                 if (bmcr & BMCR_ANENABLE) {
5019                         u32 phy1, phy2;
5020
5021                         /* Select shadow register 0x1f */
5022                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5023                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5024
5025                         /* Select expansion interrupt status register */
5026                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5027                                          MII_TG3_DSP_EXP1_INT_STAT);
5028                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5029                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5030
5031                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5032                                 /* We have signal detect and not receiving
5033                                  * config code words, link is up by parallel
5034                                  * detection.
5035                                  */
5036
5037                                 bmcr &= ~BMCR_ANENABLE;
5038                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5039                                 tg3_writephy(tp, MII_BMCR, bmcr);
5040                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5041                         }
5042                 }
5043         } else if (netif_carrier_ok(tp->dev) &&
5044                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5045                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5046                 u32 phy2;
5047
5048                 /* Select expansion interrupt status register */
5049                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5050                                  MII_TG3_DSP_EXP1_INT_STAT);
5051                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5052                 if (phy2 & 0x20) {
5053                         u32 bmcr;
5054
5055                         /* Config code words received, turn on autoneg. */
5056                         tg3_readphy(tp, MII_BMCR, &bmcr);
5057                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5058
5059                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5060
5061                 }
5062         }
5063 }
5064
5065 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5066 {
5067         u32 val;
5068         int err;
5069
5070         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5071                 err = tg3_setup_fiber_phy(tp, force_reset);
5072         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5073                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5074         else
5075                 err = tg3_setup_copper_phy(tp, force_reset);
5076
5077         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5078                 u32 scale;
5079
5080                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5081                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5082                         scale = 65;
5083                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5084                         scale = 6;
5085                 else
5086                         scale = 12;
5087
5088                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5089                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5090                 tw32(GRC_MISC_CFG, val);
5091         }
5092
5093         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5094               (6 << TX_LENGTHS_IPG_SHIFT);
5095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5096                 val |= tr32(MAC_TX_LENGTHS) &
5097                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5098                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5099
5100         if (tp->link_config.active_speed == SPEED_1000 &&
5101             tp->link_config.active_duplex == DUPLEX_HALF)
5102                 tw32(MAC_TX_LENGTHS, val |
5103                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5104         else
5105                 tw32(MAC_TX_LENGTHS, val |
5106                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5107
5108         if (!tg3_flag(tp, 5705_PLUS)) {
5109                 if (netif_carrier_ok(tp->dev)) {
5110                         tw32(HOSTCC_STAT_COAL_TICKS,
5111                              tp->coal.stats_block_coalesce_usecs);
5112                 } else {
5113                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5114                 }
5115         }
5116
5117         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5118                 val = tr32(PCIE_PWR_MGMT_THRESH);
5119                 if (!netif_carrier_ok(tp->dev))
5120                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5121                               tp->pwrmgmt_thresh;
5122                 else
5123                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5124                 tw32(PCIE_PWR_MGMT_THRESH, val);
5125         }
5126
5127         return err;
5128 }
5129
5130 static inline int tg3_irq_sync(struct tg3 *tp)
5131 {
5132         return tp->irq_sync;
5133 }
5134
5135 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5136 {
5137         int i;
5138
5139         dst = (u32 *)((u8 *)dst + off);
5140         for (i = 0; i < len; i += sizeof(u32))
5141                 *dst++ = tr32(off + i);
5142 }
5143
5144 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5145 {
5146         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5147         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5148         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5149         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5150         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5151         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5152         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5153         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5154         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5155         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5156         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5157         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5158         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5159         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5160         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5161         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5162         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5163         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5164         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5165
5166         if (tg3_flag(tp, SUPPORT_MSIX))
5167                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5168
5169         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5170         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5171         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5172         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5173         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5174         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5175         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5176         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5177
5178         if (!tg3_flag(tp, 5705_PLUS)) {
5179                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5180                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5181                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5182         }
5183
5184         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5185         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5186         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5187         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5188         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5189
5190         if (tg3_flag(tp, NVRAM))
5191                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5192 }
5193
5194 static void tg3_dump_state(struct tg3 *tp)
5195 {
5196         int i;
5197         u32 *regs;
5198
5199         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5200         if (!regs) {
5201                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5202                 return;
5203         }
5204
5205         if (tg3_flag(tp, PCI_EXPRESS)) {
5206                 /* Read up to but not including private PCI registers */
5207                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5208                         regs[i / sizeof(u32)] = tr32(i);
5209         } else
5210                 tg3_dump_legacy_regs(tp, regs);
5211
5212         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5213                 if (!regs[i + 0] && !regs[i + 1] &&
5214                     !regs[i + 2] && !regs[i + 3])
5215                         continue;
5216
5217                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5218                            i * 4,
5219                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5220         }
5221
5222         kfree(regs);
5223
5224         for (i = 0; i < tp->irq_cnt; i++) {
5225                 struct tg3_napi *tnapi = &tp->napi[i];
5226
5227                 /* SW status block */
5228                 netdev_err(tp->dev,
5229                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5230                            i,
5231                            tnapi->hw_status->status,
5232                            tnapi->hw_status->status_tag,
5233                            tnapi->hw_status->rx_jumbo_consumer,
5234                            tnapi->hw_status->rx_consumer,
5235                            tnapi->hw_status->rx_mini_consumer,
5236                            tnapi->hw_status->idx[0].rx_producer,
5237                            tnapi->hw_status->idx[0].tx_consumer);
5238
5239                 netdev_err(tp->dev,
5240                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5241                            i,
5242                            tnapi->last_tag, tnapi->last_irq_tag,
5243                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5244                            tnapi->rx_rcb_ptr,
5245                            tnapi->prodring.rx_std_prod_idx,
5246                            tnapi->prodring.rx_std_cons_idx,
5247                            tnapi->prodring.rx_jmb_prod_idx,
5248                            tnapi->prodring.rx_jmb_cons_idx);
5249         }
5250 }
5251
5252 /* This is called whenever we suspect that the system chipset is re-
5253  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5254  * is bogus tx completions. We try to recover by setting the
5255  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5256  * in the workqueue.
5257  */
5258 static void tg3_tx_recover(struct tg3 *tp)
5259 {
5260         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5261                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5262
5263         netdev_warn(tp->dev,
5264                     "The system may be re-ordering memory-mapped I/O "
5265                     "cycles to the network device, attempting to recover. "
5266                     "Please report the problem to the driver maintainer "
5267                     "and include system chipset information.\n");
5268
5269         spin_lock(&tp->lock);
5270         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5271         spin_unlock(&tp->lock);
5272 }
5273
5274 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5275 {
5276         /* Tell compiler to fetch tx indices from memory. */
5277         barrier();
5278         return tnapi->tx_pending -
5279                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5280 }
5281
5282 /* Tigon3 never reports partial packet sends.  So we do not
5283  * need special logic to handle SKBs that have not had all
5284  * of their frags sent yet, like SunGEM does.
5285  */
5286 static void tg3_tx(struct tg3_napi *tnapi)
5287 {
5288         struct tg3 *tp = tnapi->tp;
5289         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5290         u32 sw_idx = tnapi->tx_cons;
5291         struct netdev_queue *txq;
5292         int index = tnapi - tp->napi;
5293         unsigned int pkts_compl = 0, bytes_compl = 0;
5294
5295         if (tg3_flag(tp, ENABLE_TSS))
5296                 index--;
5297
5298         txq = netdev_get_tx_queue(tp->dev, index);
5299
5300         while (sw_idx != hw_idx) {
5301                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5302                 struct sk_buff *skb = ri->skb;
5303                 int i, tx_bug = 0;
5304
5305                 if (unlikely(skb == NULL)) {
5306                         tg3_tx_recover(tp);
5307                         return;
5308                 }
5309
5310                 pci_unmap_single(tp->pdev,
5311                                  dma_unmap_addr(ri, mapping),
5312                                  skb_headlen(skb),
5313                                  PCI_DMA_TODEVICE);
5314
5315                 ri->skb = NULL;
5316
5317                 while (ri->fragmented) {
5318                         ri->fragmented = false;
5319                         sw_idx = NEXT_TX(sw_idx);
5320                         ri = &tnapi->tx_buffers[sw_idx];
5321                 }
5322
5323                 sw_idx = NEXT_TX(sw_idx);
5324
5325                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5326                         ri = &tnapi->tx_buffers[sw_idx];
5327                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5328                                 tx_bug = 1;
5329
5330                         pci_unmap_page(tp->pdev,
5331                                        dma_unmap_addr(ri, mapping),
5332                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5333                                        PCI_DMA_TODEVICE);
5334
5335                         while (ri->fragmented) {
5336                                 ri->fragmented = false;
5337                                 sw_idx = NEXT_TX(sw_idx);
5338                                 ri = &tnapi->tx_buffers[sw_idx];
5339                         }
5340
5341                         sw_idx = NEXT_TX(sw_idx);
5342                 }
5343
5344                 pkts_compl++;
5345                 bytes_compl += skb->len;
5346
5347                 dev_kfree_skb(skb);
5348
5349                 if (unlikely(tx_bug)) {
5350                         tg3_tx_recover(tp);
5351                         return;
5352                 }
5353         }
5354
5355         netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5356
5357         tnapi->tx_cons = sw_idx;
5358
5359         /* Need to make the tx_cons update visible to tg3_start_xmit()
5360          * before checking for netif_queue_stopped().  Without the
5361          * memory barrier, there is a small possibility that tg3_start_xmit()
5362          * will miss it and cause the queue to be stopped forever.
5363          */
5364         smp_mb();
5365
5366         if (unlikely(netif_tx_queue_stopped(txq) &&
5367                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5368                 __netif_tx_lock(txq, smp_processor_id());
5369                 if (netif_tx_queue_stopped(txq) &&
5370                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5371                         netif_tx_wake_queue(txq);
5372                 __netif_tx_unlock(txq);
5373         }
5374 }
5375
5376 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5377 {
5378         if (!ri->data)
5379                 return;
5380
5381         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5382                          map_sz, PCI_DMA_FROMDEVICE);
5383         kfree(ri->data);
5384         ri->data = NULL;
5385 }
5386
5387 /* Returns size of skb allocated or < 0 on error.
5388  *
5389  * We only need to fill in the address because the other members
5390  * of the RX descriptor are invariant, see tg3_init_rings.
5391  *
5392  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5393  * posting buffers we only dirty the first cache line of the RX
5394  * descriptor (containing the address).  Whereas for the RX status
5395  * buffers the cpu only reads the last cacheline of the RX descriptor
5396  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5397  */
5398 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5399                             u32 opaque_key, u32 dest_idx_unmasked)
5400 {
5401         struct tg3_rx_buffer_desc *desc;
5402         struct ring_info *map;
5403         u8 *data;
5404         dma_addr_t mapping;
5405         int skb_size, data_size, dest_idx;
5406
5407         switch (opaque_key) {
5408         case RXD_OPAQUE_RING_STD:
5409                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5410                 desc = &tpr->rx_std[dest_idx];
5411                 map = &tpr->rx_std_buffers[dest_idx];
5412                 data_size = tp->rx_pkt_map_sz;
5413                 break;
5414
5415         case RXD_OPAQUE_RING_JUMBO:
5416                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5417                 desc = &tpr->rx_jmb[dest_idx].std;
5418                 map = &tpr->rx_jmb_buffers[dest_idx];
5419                 data_size = TG3_RX_JMB_MAP_SZ;
5420                 break;
5421
5422         default:
5423                 return -EINVAL;
5424         }
5425
5426         /* Do not overwrite any of the map or rp information
5427          * until we are sure we can commit to a new buffer.
5428          *
5429          * Callers depend upon this behavior and assume that
5430          * we leave everything unchanged if we fail.
5431          */
5432         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5433                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5434         data = kmalloc(skb_size, GFP_ATOMIC);
5435         if (!data)
5436                 return -ENOMEM;
5437
5438         mapping = pci_map_single(tp->pdev,
5439                                  data + TG3_RX_OFFSET(tp),
5440                                  data_size,
5441                                  PCI_DMA_FROMDEVICE);
5442         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5443                 kfree(data);
5444                 return -EIO;
5445         }
5446
5447         map->data = data;
5448         dma_unmap_addr_set(map, mapping, mapping);
5449
5450         desc->addr_hi = ((u64)mapping >> 32);
5451         desc->addr_lo = ((u64)mapping & 0xffffffff);
5452
5453         return data_size;
5454 }
5455
5456 /* We only need to move over in the address because the other
5457  * members of the RX descriptor are invariant.  See notes above
5458  * tg3_alloc_rx_data for full details.
5459  */
5460 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5461                            struct tg3_rx_prodring_set *dpr,
5462                            u32 opaque_key, int src_idx,
5463                            u32 dest_idx_unmasked)
5464 {
5465         struct tg3 *tp = tnapi->tp;
5466         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5467         struct ring_info *src_map, *dest_map;
5468         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5469         int dest_idx;
5470
5471         switch (opaque_key) {
5472         case RXD_OPAQUE_RING_STD:
5473                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5474                 dest_desc = &dpr->rx_std[dest_idx];
5475                 dest_map = &dpr->rx_std_buffers[dest_idx];
5476                 src_desc = &spr->rx_std[src_idx];
5477                 src_map = &spr->rx_std_buffers[src_idx];
5478                 break;
5479
5480         case RXD_OPAQUE_RING_JUMBO:
5481                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5482                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5483                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5484                 src_desc = &spr->rx_jmb[src_idx].std;
5485                 src_map = &spr->rx_jmb_buffers[src_idx];
5486                 break;
5487
5488         default:
5489                 return;
5490         }
5491
5492         dest_map->data = src_map->data;
5493         dma_unmap_addr_set(dest_map, mapping,
5494                            dma_unmap_addr(src_map, mapping));
5495         dest_desc->addr_hi = src_desc->addr_hi;
5496         dest_desc->addr_lo = src_desc->addr_lo;
5497
5498         /* Ensure that the update to the skb happens after the physical
5499          * addresses have been transferred to the new BD location.
5500          */
5501         smp_wmb();
5502
5503         src_map->data = NULL;
5504 }
5505
5506 /* The RX ring scheme is composed of multiple rings which post fresh
5507  * buffers to the chip, and one special ring the chip uses to report
5508  * status back to the host.
5509  *
5510  * The special ring reports the status of received packets to the
5511  * host.  The chip does not write into the original descriptor the
5512  * RX buffer was obtained from.  The chip simply takes the original
5513  * descriptor as provided by the host, updates the status and length
5514  * field, then writes this into the next status ring entry.
5515  *
5516  * Each ring the host uses to post buffers to the chip is described
5517  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5518  * it is first placed into the on-chip ram.  When the packet's length
5519  * is known, it walks down the TG3_BDINFO entries to select the ring.
5520  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5521  * which is within the range of the new packet's length is chosen.
5522  *
5523  * The "separate ring for rx status" scheme may sound queer, but it makes
5524  * sense from a cache coherency perspective.  If only the host writes
5525  * to the buffer post rings, and only the chip writes to the rx status
5526  * rings, then cache lines never move beyond shared-modified state.
5527  * If both the host and chip were to write into the same ring, cache line
5528  * eviction could occur since both entities want it in an exclusive state.
5529  */
5530 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5531 {
5532         struct tg3 *tp = tnapi->tp;
5533         u32 work_mask, rx_std_posted = 0;
5534         u32 std_prod_idx, jmb_prod_idx;
5535         u32 sw_idx = tnapi->rx_rcb_ptr;
5536         u16 hw_idx;
5537         int received;
5538         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5539
5540         hw_idx = *(tnapi->rx_rcb_prod_idx);
5541         /*
5542          * We need to order the read of hw_idx and the read of
5543          * the opaque cookie.
5544          */
5545         rmb();
5546         work_mask = 0;
5547         received = 0;
5548         std_prod_idx = tpr->rx_std_prod_idx;
5549         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5550         while (sw_idx != hw_idx && budget > 0) {
5551                 struct ring_info *ri;
5552                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5553                 unsigned int len;
5554                 struct sk_buff *skb;
5555                 dma_addr_t dma_addr;
5556                 u32 opaque_key, desc_idx, *post_ptr;
5557                 u8 *data;
5558
5559                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5560                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5561                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5562                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5563                         dma_addr = dma_unmap_addr(ri, mapping);
5564                         data = ri->data;
5565                         post_ptr = &std_prod_idx;
5566                         rx_std_posted++;
5567                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5568                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5569                         dma_addr = dma_unmap_addr(ri, mapping);
5570                         data = ri->data;
5571                         post_ptr = &jmb_prod_idx;
5572                 } else
5573                         goto next_pkt_nopost;
5574
5575                 work_mask |= opaque_key;
5576
5577                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5578                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5579                 drop_it:
5580                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5581                                        desc_idx, *post_ptr);
5582                 drop_it_no_recycle:
5583                         /* Other statistics kept track of by card. */
5584                         tp->rx_dropped++;
5585                         goto next_pkt;
5586                 }
5587
5588                 prefetch(data + TG3_RX_OFFSET(tp));
5589                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5590                       ETH_FCS_LEN;
5591
5592                 if (len > TG3_RX_COPY_THRESH(tp)) {
5593                         int skb_size;
5594
5595                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5596                                                     *post_ptr);
5597                         if (skb_size < 0)
5598                                 goto drop_it;
5599
5600                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5601                                          PCI_DMA_FROMDEVICE);
5602
5603                         skb = build_skb(data);
5604                         if (!skb) {
5605                                 kfree(data);
5606                                 goto drop_it_no_recycle;
5607                         }
5608                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5609                         /* Ensure that the update to the data happens
5610                          * after the usage of the old DMA mapping.
5611                          */
5612                         smp_wmb();
5613
5614                         ri->data = NULL;
5615
5616                 } else {
5617                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5618                                        desc_idx, *post_ptr);
5619
5620                         skb = netdev_alloc_skb(tp->dev,
5621                                                len + TG3_RAW_IP_ALIGN);
5622                         if (skb == NULL)
5623                                 goto drop_it_no_recycle;
5624
5625                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5626                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5627                         memcpy(skb->data,
5628                                data + TG3_RX_OFFSET(tp),
5629                                len);
5630                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5631                 }
5632
5633                 skb_put(skb, len);
5634                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5635                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5636                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5637                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5638                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5639                 else
5640                         skb_checksum_none_assert(skb);
5641
5642                 skb->protocol = eth_type_trans(skb, tp->dev);
5643
5644                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5645                     skb->protocol != htons(ETH_P_8021Q)) {
5646                         dev_kfree_skb(skb);
5647                         goto drop_it_no_recycle;
5648                 }
5649
5650                 if (desc->type_flags & RXD_FLAG_VLAN &&
5651                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5652                         __vlan_hwaccel_put_tag(skb,
5653                                                desc->err_vlan & RXD_VLAN_MASK);
5654
5655                 napi_gro_receive(&tnapi->napi, skb);
5656
5657                 received++;
5658                 budget--;
5659
5660 next_pkt:
5661                 (*post_ptr)++;
5662
5663                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5664                         tpr->rx_std_prod_idx = std_prod_idx &
5665                                                tp->rx_std_ring_mask;
5666                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5667                                      tpr->rx_std_prod_idx);
5668                         work_mask &= ~RXD_OPAQUE_RING_STD;
5669                         rx_std_posted = 0;
5670                 }
5671 next_pkt_nopost:
5672                 sw_idx++;
5673                 sw_idx &= tp->rx_ret_ring_mask;
5674
5675                 /* Refresh hw_idx to see if there is new work */
5676                 if (sw_idx == hw_idx) {
5677                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5678                         rmb();
5679                 }
5680         }
5681
5682         /* ACK the status ring. */
5683         tnapi->rx_rcb_ptr = sw_idx;
5684         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5685
5686         /* Refill RX ring(s). */
5687         if (!tg3_flag(tp, ENABLE_RSS)) {
5688                 if (work_mask & RXD_OPAQUE_RING_STD) {
5689                         tpr->rx_std_prod_idx = std_prod_idx &
5690                                                tp->rx_std_ring_mask;
5691                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5692                                      tpr->rx_std_prod_idx);
5693                 }
5694                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5695                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5696                                                tp->rx_jmb_ring_mask;
5697                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5698                                      tpr->rx_jmb_prod_idx);
5699                 }
5700                 mmiowb();
5701         } else if (work_mask) {
5702                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5703                  * updated before the producer indices can be updated.
5704                  */
5705                 smp_wmb();
5706
5707                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5708                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5709
5710                 if (tnapi != &tp->napi[1])
5711                         napi_schedule(&tp->napi[1].napi);
5712         }
5713
5714         return received;
5715 }
5716
5717 static void tg3_poll_link(struct tg3 *tp)
5718 {
5719         /* handle link change and other phy events */
5720         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5721                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5722
5723                 if (sblk->status & SD_STATUS_LINK_CHG) {
5724                         sblk->status = SD_STATUS_UPDATED |
5725                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5726                         spin_lock(&tp->lock);
5727                         if (tg3_flag(tp, USE_PHYLIB)) {
5728                                 tw32_f(MAC_STATUS,
5729                                      (MAC_STATUS_SYNC_CHANGED |
5730                                       MAC_STATUS_CFG_CHANGED |
5731                                       MAC_STATUS_MI_COMPLETION |
5732                                       MAC_STATUS_LNKSTATE_CHANGED));
5733                                 udelay(40);
5734                         } else
5735                                 tg3_setup_phy(tp, 0);
5736                         spin_unlock(&tp->lock);
5737                 }
5738         }
5739 }
5740
5741 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5742                                 struct tg3_rx_prodring_set *dpr,
5743                                 struct tg3_rx_prodring_set *spr)
5744 {
5745         u32 si, di, cpycnt, src_prod_idx;
5746         int i, err = 0;
5747
5748         while (1) {
5749                 src_prod_idx = spr->rx_std_prod_idx;
5750
5751                 /* Make sure updates to the rx_std_buffers[] entries and the
5752                  * standard producer index are seen in the correct order.
5753                  */
5754                 smp_rmb();
5755
5756                 if (spr->rx_std_cons_idx == src_prod_idx)
5757                         break;
5758
5759                 if (spr->rx_std_cons_idx < src_prod_idx)
5760                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5761                 else
5762                         cpycnt = tp->rx_std_ring_mask + 1 -
5763                                  spr->rx_std_cons_idx;
5764
5765                 cpycnt = min(cpycnt,
5766                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5767
5768                 si = spr->rx_std_cons_idx;
5769                 di = dpr->rx_std_prod_idx;
5770
5771                 for (i = di; i < di + cpycnt; i++) {
5772                         if (dpr->rx_std_buffers[i].data) {
5773                                 cpycnt = i - di;
5774                                 err = -ENOSPC;
5775                                 break;
5776                         }
5777                 }
5778
5779                 if (!cpycnt)
5780                         break;
5781
5782                 /* Ensure that updates to the rx_std_buffers ring and the
5783                  * shadowed hardware producer ring from tg3_recycle_skb() are
5784                  * ordered correctly WRT the skb check above.
5785                  */
5786                 smp_rmb();
5787
5788                 memcpy(&dpr->rx_std_buffers[di],
5789                        &spr->rx_std_buffers[si],
5790                        cpycnt * sizeof(struct ring_info));
5791
5792                 for (i = 0; i < cpycnt; i++, di++, si++) {
5793                         struct tg3_rx_buffer_desc *sbd, *dbd;
5794                         sbd = &spr->rx_std[si];
5795                         dbd = &dpr->rx_std[di];
5796                         dbd->addr_hi = sbd->addr_hi;
5797                         dbd->addr_lo = sbd->addr_lo;
5798                 }
5799
5800                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5801                                        tp->rx_std_ring_mask;
5802                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5803                                        tp->rx_std_ring_mask;
5804         }
5805
5806         while (1) {
5807                 src_prod_idx = spr->rx_jmb_prod_idx;
5808
5809                 /* Make sure updates to the rx_jmb_buffers[] entries and
5810                  * the jumbo producer index are seen in the correct order.
5811                  */
5812                 smp_rmb();
5813
5814                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5815                         break;
5816
5817                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5818                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5819                 else
5820                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5821                                  spr->rx_jmb_cons_idx;
5822
5823                 cpycnt = min(cpycnt,
5824                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5825
5826                 si = spr->rx_jmb_cons_idx;
5827                 di = dpr->rx_jmb_prod_idx;
5828
5829                 for (i = di; i < di + cpycnt; i++) {
5830                         if (dpr->rx_jmb_buffers[i].data) {
5831                                 cpycnt = i - di;
5832                                 err = -ENOSPC;
5833                                 break;
5834                         }
5835                 }
5836
5837                 if (!cpycnt)
5838                         break;
5839
5840                 /* Ensure that updates to the rx_jmb_buffers ring and the
5841                  * shadowed hardware producer ring from tg3_recycle_skb() are
5842                  * ordered correctly WRT the skb check above.
5843                  */
5844                 smp_rmb();
5845
5846                 memcpy(&dpr->rx_jmb_buffers[di],
5847                        &spr->rx_jmb_buffers[si],
5848                        cpycnt * sizeof(struct ring_info));
5849
5850                 for (i = 0; i < cpycnt; i++, di++, si++) {
5851                         struct tg3_rx_buffer_desc *sbd, *dbd;
5852                         sbd = &spr->rx_jmb[si].std;
5853                         dbd = &dpr->rx_jmb[di].std;
5854                         dbd->addr_hi = sbd->addr_hi;
5855                         dbd->addr_lo = sbd->addr_lo;
5856                 }
5857
5858                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5859                                        tp->rx_jmb_ring_mask;
5860                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5861                                        tp->rx_jmb_ring_mask;
5862         }
5863
5864         return err;
5865 }
5866
5867 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5868 {
5869         struct tg3 *tp = tnapi->tp;
5870
5871         /* run TX completion thread */
5872         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5873                 tg3_tx(tnapi);
5874                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5875                         return work_done;
5876         }
5877
5878         /* run RX thread, within the bounds set by NAPI.
5879          * All RX "locking" is done by ensuring outside
5880          * code synchronizes with tg3->napi.poll()
5881          */
5882         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5883                 work_done += tg3_rx(tnapi, budget - work_done);
5884
5885         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5886                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5887                 int i, err = 0;
5888                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5889                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5890
5891                 for (i = 1; i < tp->irq_cnt; i++)
5892                         err |= tg3_rx_prodring_xfer(tp, dpr,
5893                                                     &tp->napi[i].prodring);
5894
5895                 wmb();
5896
5897                 if (std_prod_idx != dpr->rx_std_prod_idx)
5898                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5899                                      dpr->rx_std_prod_idx);
5900
5901                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5902                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5903                                      dpr->rx_jmb_prod_idx);
5904
5905                 mmiowb();
5906
5907                 if (err)
5908                         tw32_f(HOSTCC_MODE, tp->coal_now);
5909         }
5910
5911         return work_done;
5912 }
5913
5914 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5915 {
5916         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5917                 schedule_work(&tp->reset_task);
5918 }
5919
5920 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5921 {
5922         cancel_work_sync(&tp->reset_task);
5923         tg3_flag_clear(tp, RESET_TASK_PENDING);
5924 }
5925
5926 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5927 {
5928         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5929         struct tg3 *tp = tnapi->tp;
5930         int work_done = 0;
5931         struct tg3_hw_status *sblk = tnapi->hw_status;
5932
5933         while (1) {
5934                 work_done = tg3_poll_work(tnapi, work_done, budget);
5935
5936                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5937                         goto tx_recovery;
5938
5939                 if (unlikely(work_done >= budget))
5940                         break;
5941
5942                 /* tp->last_tag is used in tg3_int_reenable() below
5943                  * to tell the hw how much work has been processed,
5944                  * so we must read it before checking for more work.
5945                  */
5946                 tnapi->last_tag = sblk->status_tag;
5947                 tnapi->last_irq_tag = tnapi->last_tag;
5948                 rmb();
5949
5950                 /* check for RX/TX work to do */
5951                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5952                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5953                         napi_complete(napi);
5954                         /* Reenable interrupts. */
5955                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5956                         mmiowb();
5957                         break;
5958                 }
5959         }
5960
5961         return work_done;
5962
5963 tx_recovery:
5964         /* work_done is guaranteed to be less than budget. */
5965         napi_complete(napi);
5966         tg3_reset_task_schedule(tp);
5967         return work_done;
5968 }
5969
5970 static void tg3_process_error(struct tg3 *tp)
5971 {
5972         u32 val;
5973         bool real_error = false;
5974
5975         if (tg3_flag(tp, ERROR_PROCESSED))
5976                 return;
5977
5978         /* Check Flow Attention register */
5979         val = tr32(HOSTCC_FLOW_ATTN);
5980         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5981                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5982                 real_error = true;
5983         }
5984
5985         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5986                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5987                 real_error = true;
5988         }
5989
5990         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5991                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5992                 real_error = true;
5993         }
5994
5995         if (!real_error)
5996                 return;
5997
5998         tg3_dump_state(tp);
5999
6000         tg3_flag_set(tp, ERROR_PROCESSED);
6001         tg3_reset_task_schedule(tp);
6002 }
6003
6004 static int tg3_poll(struct napi_struct *napi, int budget)
6005 {
6006         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6007         struct tg3 *tp = tnapi->tp;
6008         int work_done = 0;
6009         struct tg3_hw_status *sblk = tnapi->hw_status;
6010
6011         while (1) {
6012                 if (sblk->status & SD_STATUS_ERROR)
6013                         tg3_process_error(tp);
6014
6015                 tg3_poll_link(tp);
6016
6017                 work_done = tg3_poll_work(tnapi, work_done, budget);
6018
6019                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6020                         goto tx_recovery;
6021
6022                 if (unlikely(work_done >= budget))
6023                         break;
6024
6025                 if (tg3_flag(tp, TAGGED_STATUS)) {
6026                         /* tp->last_tag is used in tg3_int_reenable() below
6027                          * to tell the hw how much work has been processed,
6028                          * so we must read it before checking for more work.
6029                          */
6030                         tnapi->last_tag = sblk->status_tag;
6031                         tnapi->last_irq_tag = tnapi->last_tag;
6032                         rmb();
6033                 } else
6034                         sblk->status &= ~SD_STATUS_UPDATED;
6035
6036                 if (likely(!tg3_has_work(tnapi))) {
6037                         napi_complete(napi);
6038                         tg3_int_reenable(tnapi);
6039                         break;
6040                 }
6041         }
6042
6043         return work_done;
6044
6045 tx_recovery:
6046         /* work_done is guaranteed to be less than budget. */
6047         napi_complete(napi);
6048         tg3_reset_task_schedule(tp);
6049         return work_done;
6050 }
6051
6052 static void tg3_napi_disable(struct tg3 *tp)
6053 {
6054         int i;
6055
6056         for (i = tp->irq_cnt - 1; i >= 0; i--)
6057                 napi_disable(&tp->napi[i].napi);
6058 }
6059
6060 static void tg3_napi_enable(struct tg3 *tp)
6061 {
6062         int i;
6063
6064         for (i = 0; i < tp->irq_cnt; i++)
6065                 napi_enable(&tp->napi[i].napi);
6066 }
6067
6068 static void tg3_napi_init(struct tg3 *tp)
6069 {
6070         int i;
6071
6072         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6073         for (i = 1; i < tp->irq_cnt; i++)
6074                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6075 }
6076
6077 static void tg3_napi_fini(struct tg3 *tp)
6078 {
6079         int i;
6080
6081         for (i = 0; i < tp->irq_cnt; i++)
6082                 netif_napi_del(&tp->napi[i].napi);
6083 }
6084
6085 static inline void tg3_netif_stop(struct tg3 *tp)
6086 {
6087         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6088         tg3_napi_disable(tp);
6089         netif_tx_disable(tp->dev);
6090 }
6091
6092 static inline void tg3_netif_start(struct tg3 *tp)
6093 {
6094         /* NOTE: unconditional netif_tx_wake_all_queues is only
6095          * appropriate so long as all callers are assured to
6096          * have free tx slots (such as after tg3_init_hw)
6097          */
6098         netif_tx_wake_all_queues(tp->dev);
6099
6100         tg3_napi_enable(tp);
6101         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6102         tg3_enable_ints(tp);
6103 }
6104
6105 static void tg3_irq_quiesce(struct tg3 *tp)
6106 {
6107         int i;
6108
6109         BUG_ON(tp->irq_sync);
6110
6111         tp->irq_sync = 1;
6112         smp_mb();
6113
6114         for (i = 0; i < tp->irq_cnt; i++)
6115                 synchronize_irq(tp->napi[i].irq_vec);
6116 }
6117
6118 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6119  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6120  * with as well.  Most of the time, this is not necessary except when
6121  * shutting down the device.
6122  */
6123 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6124 {
6125         spin_lock_bh(&tp->lock);
6126         if (irq_sync)
6127                 tg3_irq_quiesce(tp);
6128 }
6129
6130 static inline void tg3_full_unlock(struct tg3 *tp)
6131 {
6132         spin_unlock_bh(&tp->lock);
6133 }
6134
6135 /* One-shot MSI handler - Chip automatically disables interrupt
6136  * after sending MSI so driver doesn't have to do it.
6137  */
6138 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6139 {
6140         struct tg3_napi *tnapi = dev_id;
6141         struct tg3 *tp = tnapi->tp;
6142
6143         prefetch(tnapi->hw_status);
6144         if (tnapi->rx_rcb)
6145                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6146
6147         if (likely(!tg3_irq_sync(tp)))
6148                 napi_schedule(&tnapi->napi);
6149
6150         return IRQ_HANDLED;
6151 }
6152
6153 /* MSI ISR - No need to check for interrupt sharing and no need to
6154  * flush status block and interrupt mailbox. PCI ordering rules
6155  * guarantee that MSI will arrive after the status block.
6156  */
6157 static irqreturn_t tg3_msi(int irq, void *dev_id)
6158 {
6159         struct tg3_napi *tnapi = dev_id;
6160         struct tg3 *tp = tnapi->tp;
6161
6162         prefetch(tnapi->hw_status);
6163         if (tnapi->rx_rcb)
6164                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6165         /*
6166          * Writing any value to intr-mbox-0 clears PCI INTA# and
6167          * chip-internal interrupt pending events.
6168          * Writing non-zero to intr-mbox-0 additional tells the
6169          * NIC to stop sending us irqs, engaging "in-intr-handler"
6170          * event coalescing.
6171          */
6172         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6173         if (likely(!tg3_irq_sync(tp)))
6174                 napi_schedule(&tnapi->napi);
6175
6176         return IRQ_RETVAL(1);
6177 }
6178
6179 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6180 {
6181         struct tg3_napi *tnapi = dev_id;
6182         struct tg3 *tp = tnapi->tp;
6183         struct tg3_hw_status *sblk = tnapi->hw_status;
6184         unsigned int handled = 1;
6185
6186         /* In INTx mode, it is possible for the interrupt to arrive at
6187          * the CPU before the status block posted prior to the interrupt.
6188          * Reading the PCI State register will confirm whether the
6189          * interrupt is ours and will flush the status block.
6190          */
6191         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6192                 if (tg3_flag(tp, CHIP_RESETTING) ||
6193                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6194                         handled = 0;
6195                         goto out;
6196                 }
6197         }
6198
6199         /*
6200          * Writing any value to intr-mbox-0 clears PCI INTA# and
6201          * chip-internal interrupt pending events.
6202          * Writing non-zero to intr-mbox-0 additional tells the
6203          * NIC to stop sending us irqs, engaging "in-intr-handler"
6204          * event coalescing.
6205          *
6206          * Flush the mailbox to de-assert the IRQ immediately to prevent
6207          * spurious interrupts.  The flush impacts performance but
6208          * excessive spurious interrupts can be worse in some cases.
6209          */
6210         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6211         if (tg3_irq_sync(tp))
6212                 goto out;
6213         sblk->status &= ~SD_STATUS_UPDATED;
6214         if (likely(tg3_has_work(tnapi))) {
6215                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6216                 napi_schedule(&tnapi->napi);
6217         } else {
6218                 /* No work, shared interrupt perhaps?  re-enable
6219                  * interrupts, and flush that PCI write
6220                  */
6221                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6222                                0x00000000);
6223         }
6224 out:
6225         return IRQ_RETVAL(handled);
6226 }
6227
6228 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6229 {
6230         struct tg3_napi *tnapi = dev_id;
6231         struct tg3 *tp = tnapi->tp;
6232         struct tg3_hw_status *sblk = tnapi->hw_status;
6233         unsigned int handled = 1;
6234
6235         /* In INTx mode, it is possible for the interrupt to arrive at
6236          * the CPU before the status block posted prior to the interrupt.
6237          * Reading the PCI State register will confirm whether the
6238          * interrupt is ours and will flush the status block.
6239          */
6240         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6241                 if (tg3_flag(tp, CHIP_RESETTING) ||
6242                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6243                         handled = 0;
6244                         goto out;
6245                 }
6246         }
6247
6248         /*
6249          * writing any value to intr-mbox-0 clears PCI INTA# and
6250          * chip-internal interrupt pending events.
6251          * writing non-zero to intr-mbox-0 additional tells the
6252          * NIC to stop sending us irqs, engaging "in-intr-handler"
6253          * event coalescing.
6254          *
6255          * Flush the mailbox to de-assert the IRQ immediately to prevent
6256          * spurious interrupts.  The flush impacts performance but
6257          * excessive spurious interrupts can be worse in some cases.
6258          */
6259         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6260
6261         /*
6262          * In a shared interrupt configuration, sometimes other devices'
6263          * interrupts will scream.  We record the current status tag here
6264          * so that the above check can report that the screaming interrupts
6265          * are unhandled.  Eventually they will be silenced.
6266          */
6267         tnapi->last_irq_tag = sblk->status_tag;
6268
6269         if (tg3_irq_sync(tp))
6270                 goto out;
6271
6272         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6273
6274         napi_schedule(&tnapi->napi);
6275
6276 out:
6277         return IRQ_RETVAL(handled);
6278 }
6279
6280 /* ISR for interrupt test */
6281 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6282 {
6283         struct tg3_napi *tnapi = dev_id;
6284         struct tg3 *tp = tnapi->tp;
6285         struct tg3_hw_status *sblk = tnapi->hw_status;
6286
6287         if ((sblk->status & SD_STATUS_UPDATED) ||
6288             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6289                 tg3_disable_ints(tp);
6290                 return IRQ_RETVAL(1);
6291         }
6292         return IRQ_RETVAL(0);
6293 }
6294
6295 #ifdef CONFIG_NET_POLL_CONTROLLER
6296 static void tg3_poll_controller(struct net_device *dev)
6297 {
6298         int i;
6299         struct tg3 *tp = netdev_priv(dev);
6300
6301         for (i = 0; i < tp->irq_cnt; i++)
6302                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6303 }
6304 #endif
6305
6306 static int tg3_init_hw(struct tg3 *, int);
6307 static int tg3_halt(struct tg3 *, int, int);
6308
6309 static void tg3_reset_task(struct work_struct *work)
6310 {
6311         struct tg3 *tp = container_of(work, struct tg3, reset_task);
6312         int err;
6313
6314         tg3_full_lock(tp, 0);
6315
6316         if (!netif_running(tp->dev)) {
6317                 tg3_flag_clear(tp, RESET_TASK_PENDING);
6318                 tg3_full_unlock(tp);
6319                 return;
6320         }
6321
6322         tg3_full_unlock(tp);
6323
6324         tg3_phy_stop(tp);
6325
6326         tg3_netif_stop(tp);
6327
6328         tg3_full_lock(tp, 1);
6329
6330         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6331                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6332                 tp->write32_rx_mbox = tg3_write_flush_reg32;
6333                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6334                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6335         }
6336
6337         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6338         err = tg3_init_hw(tp, 1);
6339         if (err)
6340                 goto out;
6341
6342         tg3_netif_start(tp);
6343
6344 out:
6345         tg3_full_unlock(tp);
6346
6347         if (!err)
6348                 tg3_phy_start(tp);
6349
6350         tg3_flag_clear(tp, RESET_TASK_PENDING);
6351 }
6352
6353 static void tg3_tx_timeout(struct net_device *dev)
6354 {
6355         struct tg3 *tp = netdev_priv(dev);
6356
6357         if (netif_msg_tx_err(tp)) {
6358                 netdev_err(dev, "transmit timed out, resetting\n");
6359                 tg3_dump_state(tp);
6360         }
6361
6362         tg3_reset_task_schedule(tp);
6363 }
6364
6365 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6366 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6367 {
6368         u32 base = (u32) mapping & 0xffffffff;
6369
6370         return (base > 0xffffdcc0) && (base + len + 8 < base);
6371 }
6372
6373 /* Test for DMA addresses > 40-bit */
6374 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6375                                           int len)
6376 {
6377 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6378         if (tg3_flag(tp, 40BIT_DMA_BUG))
6379                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6380         return 0;
6381 #else
6382         return 0;
6383 #endif
6384 }
6385
6386 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6387                                  dma_addr_t mapping, u32 len, u32 flags,
6388                                  u32 mss, u32 vlan)
6389 {
6390         txbd->addr_hi = ((u64) mapping >> 32);
6391         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6392         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6393         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6394 }
6395
6396 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6397                             dma_addr_t map, u32 len, u32 flags,
6398                             u32 mss, u32 vlan)
6399 {
6400         struct tg3 *tp = tnapi->tp;
6401         bool hwbug = false;
6402
6403         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6404                 hwbug = true;
6405
6406         if (tg3_4g_overflow_test(map, len))
6407                 hwbug = true;
6408
6409         if (tg3_40bit_overflow_test(tp, map, len))
6410                 hwbug = true;
6411
6412         if (tp->dma_limit) {
6413                 u32 prvidx = *entry;
6414                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6415                 while (len > tp->dma_limit && *budget) {
6416                         u32 frag_len = tp->dma_limit;
6417                         len -= tp->dma_limit;
6418
6419                         /* Avoid the 8byte DMA problem */
6420                         if (len <= 8) {
6421                                 len += tp->dma_limit / 2;
6422                                 frag_len = tp->dma_limit / 2;
6423                         }
6424
6425                         tnapi->tx_buffers[*entry].fragmented = true;
6426
6427                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6428                                       frag_len, tmp_flag, mss, vlan);
6429                         *budget -= 1;
6430                         prvidx = *entry;
6431                         *entry = NEXT_TX(*entry);
6432
6433                         map += frag_len;
6434                 }
6435
6436                 if (len) {
6437                         if (*budget) {
6438                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6439                                               len, flags, mss, vlan);
6440                                 *budget -= 1;
6441                                 *entry = NEXT_TX(*entry);
6442                         } else {
6443                                 hwbug = true;
6444                                 tnapi->tx_buffers[prvidx].fragmented = false;
6445                         }
6446                 }
6447         } else {
6448                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6449                               len, flags, mss, vlan);
6450                 *entry = NEXT_TX(*entry);
6451         }
6452
6453         return hwbug;
6454 }
6455
6456 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6457 {
6458         int i;
6459         struct sk_buff *skb;
6460         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6461
6462         skb = txb->skb;
6463         txb->skb = NULL;
6464
6465         pci_unmap_single(tnapi->tp->pdev,
6466                          dma_unmap_addr(txb, mapping),
6467                          skb_headlen(skb),
6468                          PCI_DMA_TODEVICE);
6469
6470         while (txb->fragmented) {
6471                 txb->fragmented = false;
6472                 entry = NEXT_TX(entry);
6473                 txb = &tnapi->tx_buffers[entry];
6474         }
6475
6476         for (i = 0; i <= last; i++) {
6477                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6478
6479                 entry = NEXT_TX(entry);
6480                 txb = &tnapi->tx_buffers[entry];
6481
6482                 pci_unmap_page(tnapi->tp->pdev,
6483                                dma_unmap_addr(txb, mapping),
6484                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6485
6486                 while (txb->fragmented) {
6487                         txb->fragmented = false;
6488                         entry = NEXT_TX(entry);
6489                         txb = &tnapi->tx_buffers[entry];
6490                 }
6491         }
6492 }
6493
6494 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6495 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6496                                        struct sk_buff **pskb,
6497                                        u32 *entry, u32 *budget,
6498                                        u32 base_flags, u32 mss, u32 vlan)
6499 {
6500         struct tg3 *tp = tnapi->tp;
6501         struct sk_buff *new_skb, *skb = *pskb;
6502         dma_addr_t new_addr = 0;
6503         int ret = 0;
6504
6505         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6506                 new_skb = skb_copy(skb, GFP_ATOMIC);
6507         else {
6508                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6509
6510                 new_skb = skb_copy_expand(skb,
6511                                           skb_headroom(skb) + more_headroom,
6512                                           skb_tailroom(skb), GFP_ATOMIC);
6513         }
6514
6515         if (!new_skb) {
6516                 ret = -1;
6517         } else {
6518                 /* New SKB is guaranteed to be linear. */
6519                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6520                                           PCI_DMA_TODEVICE);
6521                 /* Make sure the mapping succeeded */
6522                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6523                         dev_kfree_skb(new_skb);
6524                         ret = -1;
6525                 } else {
6526                         u32 save_entry = *entry;
6527
6528                         base_flags |= TXD_FLAG_END;
6529
6530                         tnapi->tx_buffers[*entry].skb = new_skb;
6531                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6532                                            mapping, new_addr);
6533
6534                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6535                                             new_skb->len, base_flags,
6536                                             mss, vlan)) {
6537                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6538                                 dev_kfree_skb(new_skb);
6539                                 ret = -1;
6540                         }
6541                 }
6542         }
6543
6544         dev_kfree_skb(skb);
6545         *pskb = new_skb;
6546         return ret;
6547 }
6548
6549 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6550
6551 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6552  * TSO header is greater than 80 bytes.
6553  */
6554 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6555 {
6556         struct sk_buff *segs, *nskb;
6557         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6558
6559         /* Estimate the number of fragments in the worst case */
6560         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6561                 netif_stop_queue(tp->dev);
6562
6563                 /* netif_tx_stop_queue() must be done before checking
6564                  * checking tx index in tg3_tx_avail() below, because in
6565                  * tg3_tx(), we update tx index before checking for
6566                  * netif_tx_queue_stopped().
6567                  */
6568                 smp_mb();
6569                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6570                         return NETDEV_TX_BUSY;
6571
6572                 netif_wake_queue(tp->dev);
6573         }
6574
6575         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6576         if (IS_ERR(segs))
6577                 goto tg3_tso_bug_end;
6578
6579         do {
6580                 nskb = segs;
6581                 segs = segs->next;
6582                 nskb->next = NULL;
6583                 tg3_start_xmit(nskb, tp->dev);
6584         } while (segs);
6585
6586 tg3_tso_bug_end:
6587         dev_kfree_skb(skb);
6588
6589         return NETDEV_TX_OK;
6590 }
6591
6592 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6593  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6594  */
6595 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6596 {
6597         struct tg3 *tp = netdev_priv(dev);
6598         u32 len, entry, base_flags, mss, vlan = 0;
6599         u32 budget;
6600         int i = -1, would_hit_hwbug;
6601         dma_addr_t mapping;
6602         struct tg3_napi *tnapi;
6603         struct netdev_queue *txq;
6604         unsigned int last;
6605
6606         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6607         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6608         if (tg3_flag(tp, ENABLE_TSS))
6609                 tnapi++;
6610
6611         budget = tg3_tx_avail(tnapi);
6612
6613         /* We are running in BH disabled context with netif_tx_lock
6614          * and TX reclaim runs via tp->napi.poll inside of a software
6615          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6616          * no IRQ context deadlocks to worry about either.  Rejoice!
6617          */
6618         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6619                 if (!netif_tx_queue_stopped(txq)) {
6620                         netif_tx_stop_queue(txq);
6621
6622                         /* This is a hard error, log it. */
6623                         netdev_err(dev,
6624                                    "BUG! Tx Ring full when queue awake!\n");
6625                 }
6626                 return NETDEV_TX_BUSY;
6627         }
6628
6629         entry = tnapi->tx_prod;
6630         base_flags = 0;
6631         if (skb->ip_summed == CHECKSUM_PARTIAL)
6632                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6633
6634         mss = skb_shinfo(skb)->gso_size;
6635         if (mss) {
6636                 struct iphdr *iph;
6637                 u32 tcp_opt_len, hdr_len;
6638
6639                 if (skb_header_cloned(skb) &&
6640                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6641                         goto drop;
6642
6643                 iph = ip_hdr(skb);
6644                 tcp_opt_len = tcp_optlen(skb);
6645
6646                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6647
6648                 if (!skb_is_gso_v6(skb)) {
6649                         iph->check = 0;
6650                         iph->tot_len = htons(mss + hdr_len);
6651                 }
6652
6653                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6654                     tg3_flag(tp, TSO_BUG))
6655                         return tg3_tso_bug(tp, skb);
6656
6657                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6658                                TXD_FLAG_CPU_POST_DMA);
6659
6660                 if (tg3_flag(tp, HW_TSO_1) ||
6661                     tg3_flag(tp, HW_TSO_2) ||
6662                     tg3_flag(tp, HW_TSO_3)) {
6663                         tcp_hdr(skb)->check = 0;
6664                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6665                 } else
6666                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6667                                                                  iph->daddr, 0,
6668                                                                  IPPROTO_TCP,
6669                                                                  0);
6670
6671                 if (tg3_flag(tp, HW_TSO_3)) {
6672                         mss |= (hdr_len & 0xc) << 12;
6673                         if (hdr_len & 0x10)
6674                                 base_flags |= 0x00000010;
6675                         base_flags |= (hdr_len & 0x3e0) << 5;
6676                 } else if (tg3_flag(tp, HW_TSO_2))
6677                         mss |= hdr_len << 9;
6678                 else if (tg3_flag(tp, HW_TSO_1) ||
6679                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6680                         if (tcp_opt_len || iph->ihl > 5) {
6681                                 int tsflags;
6682
6683                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6684                                 mss |= (tsflags << 11);
6685                         }
6686                 } else {
6687                         if (tcp_opt_len || iph->ihl > 5) {
6688                                 int tsflags;
6689
6690                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6691                                 base_flags |= tsflags << 12;
6692                         }
6693                 }
6694         }
6695
6696         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6697             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6698                 base_flags |= TXD_FLAG_JMB_PKT;
6699
6700         if (vlan_tx_tag_present(skb)) {
6701                 base_flags |= TXD_FLAG_VLAN;
6702                 vlan = vlan_tx_tag_get(skb);
6703         }
6704
6705         len = skb_headlen(skb);
6706
6707         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6708         if (pci_dma_mapping_error(tp->pdev, mapping))
6709                 goto drop;
6710
6711
6712         tnapi->tx_buffers[entry].skb = skb;
6713         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6714
6715         would_hit_hwbug = 0;
6716
6717         if (tg3_flag(tp, 5701_DMA_BUG))
6718                 would_hit_hwbug = 1;
6719
6720         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6721                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6722                             mss, vlan)) {
6723                 would_hit_hwbug = 1;
6724         /* Now loop through additional data fragments, and queue them. */
6725         } else if (skb_shinfo(skb)->nr_frags > 0) {
6726                 u32 tmp_mss = mss;
6727
6728                 if (!tg3_flag(tp, HW_TSO_1) &&
6729                     !tg3_flag(tp, HW_TSO_2) &&
6730                     !tg3_flag(tp, HW_TSO_3))
6731                         tmp_mss = 0;
6732
6733                 last = skb_shinfo(skb)->nr_frags - 1;
6734                 for (i = 0; i <= last; i++) {
6735                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6736
6737                         len = skb_frag_size(frag);
6738                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6739                                                    len, DMA_TO_DEVICE);
6740
6741                         tnapi->tx_buffers[entry].skb = NULL;
6742                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6743                                            mapping);
6744                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6745                                 goto dma_error;
6746
6747                         if (!budget ||
6748                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6749                                             len, base_flags |
6750                                             ((i == last) ? TXD_FLAG_END : 0),
6751                                             tmp_mss, vlan)) {
6752                                 would_hit_hwbug = 1;
6753                                 break;
6754                         }
6755                 }
6756         }
6757
6758         if (would_hit_hwbug) {
6759                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6760
6761                 /* If the workaround fails due to memory/mapping
6762                  * failure, silently drop this packet.
6763                  */
6764                 entry = tnapi->tx_prod;
6765                 budget = tg3_tx_avail(tnapi);
6766                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6767                                                 base_flags, mss, vlan))
6768                         goto drop_nofree;
6769         }
6770
6771         skb_tx_timestamp(skb);
6772         netdev_sent_queue(tp->dev, skb->len);
6773
6774         /* Packets are ready, update Tx producer idx local and on card. */
6775         tw32_tx_mbox(tnapi->prodmbox, entry);
6776
6777         tnapi->tx_prod = entry;
6778         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6779                 netif_tx_stop_queue(txq);
6780
6781                 /* netif_tx_stop_queue() must be done before checking
6782                  * checking tx index in tg3_tx_avail() below, because in
6783                  * tg3_tx(), we update tx index before checking for
6784                  * netif_tx_queue_stopped().
6785                  */
6786                 smp_mb();
6787                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6788                         netif_tx_wake_queue(txq);
6789         }
6790
6791         mmiowb();
6792         return NETDEV_TX_OK;
6793
6794 dma_error:
6795         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6796         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6797 drop:
6798         dev_kfree_skb(skb);
6799 drop_nofree:
6800         tp->tx_dropped++;
6801         return NETDEV_TX_OK;
6802 }
6803
6804 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6805 {
6806         if (enable) {
6807                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6808                                   MAC_MODE_PORT_MODE_MASK);
6809
6810                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6811
6812                 if (!tg3_flag(tp, 5705_PLUS))
6813                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6814
6815                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6816                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6817                 else
6818                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6819         } else {
6820                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6821
6822                 if (tg3_flag(tp, 5705_PLUS) ||
6823                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6824                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6825                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6826         }
6827
6828         tw32(MAC_MODE, tp->mac_mode);
6829         udelay(40);
6830 }
6831
6832 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6833 {
6834         u32 val, bmcr, mac_mode, ptest = 0;
6835
6836         tg3_phy_toggle_apd(tp, false);
6837         tg3_phy_toggle_automdix(tp, 0);
6838
6839         if (extlpbk && tg3_phy_set_extloopbk(tp))
6840                 return -EIO;
6841
6842         bmcr = BMCR_FULLDPLX;
6843         switch (speed) {
6844         case SPEED_10:
6845                 break;
6846         case SPEED_100:
6847                 bmcr |= BMCR_SPEED100;
6848                 break;
6849         case SPEED_1000:
6850         default:
6851                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6852                         speed = SPEED_100;
6853                         bmcr |= BMCR_SPEED100;
6854                 } else {
6855                         speed = SPEED_1000;
6856                         bmcr |= BMCR_SPEED1000;
6857                 }
6858         }
6859
6860         if (extlpbk) {
6861                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6862                         tg3_readphy(tp, MII_CTRL1000, &val);
6863                         val |= CTL1000_AS_MASTER |
6864                                CTL1000_ENABLE_MASTER;
6865                         tg3_writephy(tp, MII_CTRL1000, val);
6866                 } else {
6867                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6868                                 MII_TG3_FET_PTEST_TRIM_2;
6869                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6870                 }
6871         } else
6872                 bmcr |= BMCR_LOOPBACK;
6873
6874         tg3_writephy(tp, MII_BMCR, bmcr);
6875
6876         /* The write needs to be flushed for the FETs */
6877         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6878                 tg3_readphy(tp, MII_BMCR, &bmcr);
6879
6880         udelay(40);
6881
6882         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6883             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6884                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6885                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6886                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6887
6888                 /* The write needs to be flushed for the AC131 */
6889                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6890         }
6891
6892         /* Reset to prevent losing 1st rx packet intermittently */
6893         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6894             tg3_flag(tp, 5780_CLASS)) {
6895                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6896                 udelay(10);
6897                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6898         }
6899
6900         mac_mode = tp->mac_mode &
6901                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6902         if (speed == SPEED_1000)
6903                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6904         else
6905                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6906
6907         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6908                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6909
6910                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6911                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
6912                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6913                         mac_mode |= MAC_MODE_LINK_POLARITY;
6914
6915                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6916                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6917         }
6918
6919         tw32(MAC_MODE, mac_mode);
6920         udelay(40);
6921
6922         return 0;
6923 }
6924
6925 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
6926 {
6927         struct tg3 *tp = netdev_priv(dev);
6928
6929         if (features & NETIF_F_LOOPBACK) {
6930                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6931                         return;
6932
6933                 spin_lock_bh(&tp->lock);
6934                 tg3_mac_loopback(tp, true);
6935                 netif_carrier_on(tp->dev);
6936                 spin_unlock_bh(&tp->lock);
6937                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6938         } else {
6939                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6940                         return;
6941
6942                 spin_lock_bh(&tp->lock);
6943                 tg3_mac_loopback(tp, false);
6944                 /* Force link status check */
6945                 tg3_setup_phy(tp, 1);
6946                 spin_unlock_bh(&tp->lock);
6947                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6948         }
6949 }
6950
6951 static netdev_features_t tg3_fix_features(struct net_device *dev,
6952         netdev_features_t features)
6953 {
6954         struct tg3 *tp = netdev_priv(dev);
6955
6956         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6957                 features &= ~NETIF_F_ALL_TSO;
6958
6959         return features;
6960 }
6961
6962 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
6963 {
6964         netdev_features_t changed = dev->features ^ features;
6965
6966         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6967                 tg3_set_loopback(dev, features);
6968
6969         return 0;
6970 }
6971
6972 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6973                                int new_mtu)
6974 {
6975         dev->mtu = new_mtu;
6976
6977         if (new_mtu > ETH_DATA_LEN) {
6978                 if (tg3_flag(tp, 5780_CLASS)) {
6979                         netdev_update_features(dev);
6980                         tg3_flag_clear(tp, TSO_CAPABLE);
6981                 } else {
6982                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6983                 }
6984         } else {
6985                 if (tg3_flag(tp, 5780_CLASS)) {
6986                         tg3_flag_set(tp, TSO_CAPABLE);
6987                         netdev_update_features(dev);
6988                 }
6989                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6990         }
6991 }
6992
6993 static int tg3_restart_hw(struct tg3 *tp, int reset_phy);
6994
6995 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6996 {
6997         struct tg3 *tp = netdev_priv(dev);
6998         int err;
6999
7000         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7001                 return -EINVAL;
7002
7003         if (!netif_running(dev)) {
7004                 /* We'll just catch it later when the
7005                  * device is up'd.
7006                  */
7007                 tg3_set_mtu(dev, tp, new_mtu);
7008                 return 0;
7009         }
7010
7011         tg3_phy_stop(tp);
7012
7013         tg3_netif_stop(tp);
7014
7015         tg3_full_lock(tp, 1);
7016
7017         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7018
7019         tg3_set_mtu(dev, tp, new_mtu);
7020
7021         err = tg3_restart_hw(tp, 0);
7022
7023         if (!err)
7024                 tg3_netif_start(tp);
7025
7026         tg3_full_unlock(tp);
7027
7028         if (!err)
7029                 tg3_phy_start(tp);
7030
7031         return err;
7032 }
7033
7034 static void tg3_rx_prodring_free(struct tg3 *tp,
7035                                  struct tg3_rx_prodring_set *tpr)
7036 {
7037         int i;
7038
7039         if (tpr != &tp->napi[0].prodring) {
7040                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7041                      i = (i + 1) & tp->rx_std_ring_mask)
7042                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7043                                         tp->rx_pkt_map_sz);
7044
7045                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7046                         for (i = tpr->rx_jmb_cons_idx;
7047                              i != tpr->rx_jmb_prod_idx;
7048                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7049                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7050                                                 TG3_RX_JMB_MAP_SZ);
7051                         }
7052                 }
7053
7054                 return;
7055         }
7056
7057         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7058                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7059                                 tp->rx_pkt_map_sz);
7060
7061         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7062                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7063                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7064                                         TG3_RX_JMB_MAP_SZ);
7065         }
7066 }
7067
7068 /* Initialize rx rings for packet processing.
7069  *
7070  * The chip has been shut down and the driver detached from
7071  * the networking, so no interrupts or new tx packets will
7072  * end up in the driver.  tp->{tx,}lock are held and thus
7073  * we may not sleep.
7074  */
7075 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7076                                  struct tg3_rx_prodring_set *tpr)
7077 {
7078         u32 i, rx_pkt_dma_sz;
7079
7080         tpr->rx_std_cons_idx = 0;
7081         tpr->rx_std_prod_idx = 0;
7082         tpr->rx_jmb_cons_idx = 0;
7083         tpr->rx_jmb_prod_idx = 0;
7084
7085         if (tpr != &tp->napi[0].prodring) {
7086                 memset(&tpr->rx_std_buffers[0], 0,
7087                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7088                 if (tpr->rx_jmb_buffers)
7089                         memset(&tpr->rx_jmb_buffers[0], 0,
7090                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7091                 goto done;
7092         }
7093
7094         /* Zero out all descriptors. */
7095         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7096
7097         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7098         if (tg3_flag(tp, 5780_CLASS) &&
7099             tp->dev->mtu > ETH_DATA_LEN)
7100                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7101         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7102
7103         /* Initialize invariants of the rings, we only set this
7104          * stuff once.  This works because the card does not
7105          * write into the rx buffer posting rings.
7106          */
7107         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7108                 struct tg3_rx_buffer_desc *rxd;
7109
7110                 rxd = &tpr->rx_std[i];
7111                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7112                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7113                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7114                                (i << RXD_OPAQUE_INDEX_SHIFT));
7115         }
7116
7117         /* Now allocate fresh SKBs for each rx ring. */
7118         for (i = 0; i < tp->rx_pending; i++) {
7119                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7120                         netdev_warn(tp->dev,
7121                                     "Using a smaller RX standard ring. Only "
7122                                     "%d out of %d buffers were allocated "
7123                                     "successfully\n", i, tp->rx_pending);
7124                         if (i == 0)
7125                                 goto initfail;
7126                         tp->rx_pending = i;
7127                         break;
7128                 }
7129         }
7130
7131         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7132                 goto done;
7133
7134         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7135
7136         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7137                 goto done;
7138
7139         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7140                 struct tg3_rx_buffer_desc *rxd;
7141
7142                 rxd = &tpr->rx_jmb[i].std;
7143                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7144                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7145                                   RXD_FLAG_JUMBO;
7146                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7147                        (i << RXD_OPAQUE_INDEX_SHIFT));
7148         }
7149
7150         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7151                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7152                         netdev_warn(tp->dev,
7153                                     "Using a smaller RX jumbo ring. Only %d "
7154                                     "out of %d buffers were allocated "
7155                                     "successfully\n", i, tp->rx_jumbo_pending);
7156                         if (i == 0)
7157                                 goto initfail;
7158                         tp->rx_jumbo_pending = i;
7159                         break;
7160                 }
7161         }
7162
7163 done:
7164         return 0;
7165
7166 initfail:
7167         tg3_rx_prodring_free(tp, tpr);
7168         return -ENOMEM;
7169 }
7170
7171 static void tg3_rx_prodring_fini(struct tg3 *tp,
7172                                  struct tg3_rx_prodring_set *tpr)
7173 {
7174         kfree(tpr->rx_std_buffers);
7175         tpr->rx_std_buffers = NULL;
7176         kfree(tpr->rx_jmb_buffers);
7177         tpr->rx_jmb_buffers = NULL;
7178         if (tpr->rx_std) {
7179                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7180                                   tpr->rx_std, tpr->rx_std_mapping);
7181                 tpr->rx_std = NULL;
7182         }
7183         if (tpr->rx_jmb) {
7184                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7185                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7186                 tpr->rx_jmb = NULL;
7187         }
7188 }
7189
7190 static int tg3_rx_prodring_init(struct tg3 *tp,
7191                                 struct tg3_rx_prodring_set *tpr)
7192 {
7193         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7194                                       GFP_KERNEL);
7195         if (!tpr->rx_std_buffers)
7196                 return -ENOMEM;
7197
7198         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7199                                          TG3_RX_STD_RING_BYTES(tp),
7200                                          &tpr->rx_std_mapping,
7201                                          GFP_KERNEL);
7202         if (!tpr->rx_std)
7203                 goto err_out;
7204
7205         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7206                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7207                                               GFP_KERNEL);
7208                 if (!tpr->rx_jmb_buffers)
7209                         goto err_out;
7210
7211                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7212                                                  TG3_RX_JMB_RING_BYTES(tp),
7213                                                  &tpr->rx_jmb_mapping,
7214                                                  GFP_KERNEL);
7215                 if (!tpr->rx_jmb)
7216                         goto err_out;
7217         }
7218
7219         return 0;
7220
7221 err_out:
7222         tg3_rx_prodring_fini(tp, tpr);
7223         return -ENOMEM;
7224 }
7225
7226 /* Free up pending packets in all rx/tx rings.
7227  *
7228  * The chip has been shut down and the driver detached from
7229  * the networking, so no interrupts or new tx packets will
7230  * end up in the driver.  tp->{tx,}lock is not held and we are not
7231  * in an interrupt context and thus may sleep.
7232  */
7233 static void tg3_free_rings(struct tg3 *tp)
7234 {
7235         int i, j;
7236
7237         for (j = 0; j < tp->irq_cnt; j++) {
7238                 struct tg3_napi *tnapi = &tp->napi[j];
7239
7240                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7241
7242                 if (!tnapi->tx_buffers)
7243                         continue;
7244
7245                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7246                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7247
7248                         if (!skb)
7249                                 continue;
7250
7251                         tg3_tx_skb_unmap(tnapi, i,
7252                                          skb_shinfo(skb)->nr_frags - 1);
7253
7254                         dev_kfree_skb_any(skb);
7255                 }
7256         }
7257         netdev_reset_queue(tp->dev);
7258 }
7259
7260 /* Initialize tx/rx rings for packet processing.
7261  *
7262  * The chip has been shut down and the driver detached from
7263  * the networking, so no interrupts or new tx packets will
7264  * end up in the driver.  tp->{tx,}lock are held and thus
7265  * we may not sleep.
7266  */
7267 static int tg3_init_rings(struct tg3 *tp)
7268 {
7269         int i;
7270
7271         /* Free up all the SKBs. */
7272         tg3_free_rings(tp);
7273
7274         for (i = 0; i < tp->irq_cnt; i++) {
7275                 struct tg3_napi *tnapi = &tp->napi[i];
7276
7277                 tnapi->last_tag = 0;
7278                 tnapi->last_irq_tag = 0;
7279                 tnapi->hw_status->status = 0;
7280                 tnapi->hw_status->status_tag = 0;
7281                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7282
7283                 tnapi->tx_prod = 0;
7284                 tnapi->tx_cons = 0;
7285                 if (tnapi->tx_ring)
7286                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7287
7288                 tnapi->rx_rcb_ptr = 0;
7289                 if (tnapi->rx_rcb)
7290                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7291
7292                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7293                         tg3_free_rings(tp);
7294                         return -ENOMEM;
7295                 }
7296         }
7297
7298         return 0;
7299 }
7300
7301 /*
7302  * Must not be invoked with interrupt sources disabled and
7303  * the hardware shutdown down.
7304  */
7305 static void tg3_free_consistent(struct tg3 *tp)
7306 {
7307         int i;
7308
7309         for (i = 0; i < tp->irq_cnt; i++) {
7310                 struct tg3_napi *tnapi = &tp->napi[i];
7311
7312                 if (tnapi->tx_ring) {
7313                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7314                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7315                         tnapi->tx_ring = NULL;
7316                 }
7317
7318                 kfree(tnapi->tx_buffers);
7319                 tnapi->tx_buffers = NULL;
7320
7321                 if (tnapi->rx_rcb) {
7322                         dma_free_coherent(&tp->pdev->dev,
7323                                           TG3_RX_RCB_RING_BYTES(tp),
7324                                           tnapi->rx_rcb,
7325                                           tnapi->rx_rcb_mapping);
7326                         tnapi->rx_rcb = NULL;
7327                 }
7328
7329                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7330
7331                 if (tnapi->hw_status) {
7332                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7333                                           tnapi->hw_status,
7334                                           tnapi->status_mapping);
7335                         tnapi->hw_status = NULL;
7336                 }
7337         }
7338
7339         if (tp->hw_stats) {
7340                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7341                                   tp->hw_stats, tp->stats_mapping);
7342                 tp->hw_stats = NULL;
7343         }
7344 }
7345
7346 /*
7347  * Must not be invoked with interrupt sources disabled and
7348  * the hardware shutdown down.  Can sleep.
7349  */
7350 static int tg3_alloc_consistent(struct tg3 *tp)
7351 {
7352         int i;
7353
7354         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7355                                           sizeof(struct tg3_hw_stats),
7356                                           &tp->stats_mapping,
7357                                           GFP_KERNEL);
7358         if (!tp->hw_stats)
7359                 goto err_out;
7360
7361         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7362
7363         for (i = 0; i < tp->irq_cnt; i++) {
7364                 struct tg3_napi *tnapi = &tp->napi[i];
7365                 struct tg3_hw_status *sblk;
7366
7367                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7368                                                       TG3_HW_STATUS_SIZE,
7369                                                       &tnapi->status_mapping,
7370                                                       GFP_KERNEL);
7371                 if (!tnapi->hw_status)
7372                         goto err_out;
7373
7374                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7375                 sblk = tnapi->hw_status;
7376
7377                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7378                         goto err_out;
7379
7380                 /* If multivector TSS is enabled, vector 0 does not handle
7381                  * tx interrupts.  Don't allocate any resources for it.
7382                  */
7383                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7384                     (i && tg3_flag(tp, ENABLE_TSS))) {
7385                         tnapi->tx_buffers = kzalloc(
7386                                                sizeof(struct tg3_tx_ring_info) *
7387                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7388                         if (!tnapi->tx_buffers)
7389                                 goto err_out;
7390
7391                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7392                                                             TG3_TX_RING_BYTES,
7393                                                         &tnapi->tx_desc_mapping,
7394                                                             GFP_KERNEL);
7395                         if (!tnapi->tx_ring)
7396                                 goto err_out;
7397                 }
7398
7399                 /*
7400                  * When RSS is enabled, the status block format changes
7401                  * slightly.  The "rx_jumbo_consumer", "reserved",
7402                  * and "rx_mini_consumer" members get mapped to the
7403                  * other three rx return ring producer indexes.
7404                  */
7405                 switch (i) {
7406                 default:
7407                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7408                         break;
7409                 case 2:
7410                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7411                         break;
7412                 case 3:
7413                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7414                         break;
7415                 case 4:
7416                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7417                         break;
7418                 }
7419
7420                 /*
7421                  * If multivector RSS is enabled, vector 0 does not handle
7422                  * rx or tx interrupts.  Don't allocate any resources for it.
7423                  */
7424                 if (!i && tg3_flag(tp, ENABLE_RSS))
7425                         continue;
7426
7427                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7428                                                    TG3_RX_RCB_RING_BYTES(tp),
7429                                                    &tnapi->rx_rcb_mapping,
7430                                                    GFP_KERNEL);
7431                 if (!tnapi->rx_rcb)
7432                         goto err_out;
7433
7434                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7435         }
7436
7437         return 0;
7438
7439 err_out:
7440         tg3_free_consistent(tp);
7441         return -ENOMEM;
7442 }
7443
7444 #define MAX_WAIT_CNT 1000
7445
7446 /* To stop a block, clear the enable bit and poll till it
7447  * clears.  tp->lock is held.
7448  */
7449 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7450 {
7451         unsigned int i;
7452         u32 val;
7453
7454         if (tg3_flag(tp, 5705_PLUS)) {
7455                 switch (ofs) {
7456                 case RCVLSC_MODE:
7457                 case DMAC_MODE:
7458                 case MBFREE_MODE:
7459                 case BUFMGR_MODE:
7460                 case MEMARB_MODE:
7461                         /* We can't enable/disable these bits of the
7462                          * 5705/5750, just say success.
7463                          */
7464                         return 0;
7465
7466                 default:
7467                         break;
7468                 }
7469         }
7470
7471         val = tr32(ofs);
7472         val &= ~enable_bit;
7473         tw32_f(ofs, val);
7474
7475         for (i = 0; i < MAX_WAIT_CNT; i++) {
7476                 udelay(100);
7477                 val = tr32(ofs);
7478                 if ((val & enable_bit) == 0)
7479                         break;
7480         }
7481
7482         if (i == MAX_WAIT_CNT && !silent) {
7483                 dev_err(&tp->pdev->dev,
7484                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7485                         ofs, enable_bit);
7486                 return -ENODEV;
7487         }
7488
7489         return 0;
7490 }
7491
7492 /* tp->lock is held. */
7493 static int tg3_abort_hw(struct tg3 *tp, int silent)
7494 {
7495         int i, err;
7496
7497         tg3_disable_ints(tp);
7498
7499         tp->rx_mode &= ~RX_MODE_ENABLE;
7500         tw32_f(MAC_RX_MODE, tp->rx_mode);
7501         udelay(10);
7502
7503         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7504         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7505         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7506         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7507         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7508         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7509
7510         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7511         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7512         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7513         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7514         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7515         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7516         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7517
7518         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7519         tw32_f(MAC_MODE, tp->mac_mode);
7520         udelay(40);
7521
7522         tp->tx_mode &= ~TX_MODE_ENABLE;
7523         tw32_f(MAC_TX_MODE, tp->tx_mode);
7524
7525         for (i = 0; i < MAX_WAIT_CNT; i++) {
7526                 udelay(100);
7527                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7528                         break;
7529         }
7530         if (i >= MAX_WAIT_CNT) {
7531                 dev_err(&tp->pdev->dev,
7532                         "%s timed out, TX_MODE_ENABLE will not clear "
7533                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7534                 err |= -ENODEV;
7535         }
7536
7537         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7538         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7539         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7540
7541         tw32(FTQ_RESET, 0xffffffff);
7542         tw32(FTQ_RESET, 0x00000000);
7543
7544         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7545         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7546
7547         for (i = 0; i < tp->irq_cnt; i++) {
7548                 struct tg3_napi *tnapi = &tp->napi[i];
7549                 if (tnapi->hw_status)
7550                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7551         }
7552
7553         return err;
7554 }
7555
7556 /* Save PCI command register before chip reset */
7557 static void tg3_save_pci_state(struct tg3 *tp)
7558 {
7559         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7560 }
7561
7562 /* Restore PCI state after chip reset */
7563 static void tg3_restore_pci_state(struct tg3 *tp)
7564 {
7565         u32 val;
7566
7567         /* Re-enable indirect register accesses. */
7568         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7569                                tp->misc_host_ctrl);
7570
7571         /* Set MAX PCI retry to zero. */
7572         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7573         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7574             tg3_flag(tp, PCIX_MODE))
7575                 val |= PCISTATE_RETRY_SAME_DMA;
7576         /* Allow reads and writes to the APE register and memory space. */
7577         if (tg3_flag(tp, ENABLE_APE))
7578                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7579                        PCISTATE_ALLOW_APE_SHMEM_WR |
7580                        PCISTATE_ALLOW_APE_PSPACE_WR;
7581         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7582
7583         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7584
7585         if (!tg3_flag(tp, PCI_EXPRESS)) {
7586                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7587                                       tp->pci_cacheline_sz);
7588                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7589                                       tp->pci_lat_timer);
7590         }
7591
7592         /* Make sure PCI-X relaxed ordering bit is clear. */
7593         if (tg3_flag(tp, PCIX_MODE)) {
7594                 u16 pcix_cmd;
7595
7596                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7597                                      &pcix_cmd);
7598                 pcix_cmd &= ~PCI_X_CMD_ERO;
7599                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7600                                       pcix_cmd);
7601         }
7602
7603         if (tg3_flag(tp, 5780_CLASS)) {
7604
7605                 /* Chip reset on 5780 will reset MSI enable bit,
7606                  * so need to restore it.
7607                  */
7608                 if (tg3_flag(tp, USING_MSI)) {
7609                         u16 ctrl;
7610
7611                         pci_read_config_word(tp->pdev,
7612                                              tp->msi_cap + PCI_MSI_FLAGS,
7613                                              &ctrl);
7614                         pci_write_config_word(tp->pdev,
7615                                               tp->msi_cap + PCI_MSI_FLAGS,
7616                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7617                         val = tr32(MSGINT_MODE);
7618                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7619                 }
7620         }
7621 }
7622
7623 /* tp->lock is held. */
7624 static int tg3_chip_reset(struct tg3 *tp)
7625 {
7626         u32 val;
7627         void (*write_op)(struct tg3 *, u32, u32);
7628         int i, err;
7629
7630         tg3_nvram_lock(tp);
7631
7632         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7633
7634         /* No matching tg3_nvram_unlock() after this because
7635          * chip reset below will undo the nvram lock.
7636          */
7637         tp->nvram_lock_cnt = 0;
7638
7639         /* GRC_MISC_CFG core clock reset will clear the memory
7640          * enable bit in PCI register 4 and the MSI enable bit
7641          * on some chips, so we save relevant registers here.
7642          */
7643         tg3_save_pci_state(tp);
7644
7645         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7646             tg3_flag(tp, 5755_PLUS))
7647                 tw32(GRC_FASTBOOT_PC, 0);
7648
7649         /*
7650          * We must avoid the readl() that normally takes place.
7651          * It locks machines, causes machine checks, and other
7652          * fun things.  So, temporarily disable the 5701
7653          * hardware workaround, while we do the reset.
7654          */
7655         write_op = tp->write32;
7656         if (write_op == tg3_write_flush_reg32)
7657                 tp->write32 = tg3_write32;
7658
7659         /* Prevent the irq handler from reading or writing PCI registers
7660          * during chip reset when the memory enable bit in the PCI command
7661          * register may be cleared.  The chip does not generate interrupt
7662          * at this time, but the irq handler may still be called due to irq
7663          * sharing or irqpoll.
7664          */
7665         tg3_flag_set(tp, CHIP_RESETTING);
7666         for (i = 0; i < tp->irq_cnt; i++) {
7667                 struct tg3_napi *tnapi = &tp->napi[i];
7668                 if (tnapi->hw_status) {
7669                         tnapi->hw_status->status = 0;
7670                         tnapi->hw_status->status_tag = 0;
7671                 }
7672                 tnapi->last_tag = 0;
7673                 tnapi->last_irq_tag = 0;
7674         }
7675         smp_mb();
7676
7677         for (i = 0; i < tp->irq_cnt; i++)
7678                 synchronize_irq(tp->napi[i].irq_vec);
7679
7680         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7681                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7682                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7683         }
7684
7685         /* do the reset */
7686         val = GRC_MISC_CFG_CORECLK_RESET;
7687
7688         if (tg3_flag(tp, PCI_EXPRESS)) {
7689                 /* Force PCIe 1.0a mode */
7690                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7691                     !tg3_flag(tp, 57765_PLUS) &&
7692                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7693                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7694                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7695
7696                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7697                         tw32(GRC_MISC_CFG, (1 << 29));
7698                         val |= (1 << 29);
7699                 }
7700         }
7701
7702         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7703                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7704                 tw32(GRC_VCPU_EXT_CTRL,
7705                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7706         }
7707
7708         /* Manage gphy power for all CPMU absent PCIe devices. */
7709         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7710                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7711
7712         tw32(GRC_MISC_CFG, val);
7713
7714         /* restore 5701 hardware bug workaround write method */
7715         tp->write32 = write_op;
7716
7717         /* Unfortunately, we have to delay before the PCI read back.
7718          * Some 575X chips even will not respond to a PCI cfg access
7719          * when the reset command is given to the chip.
7720          *
7721          * How do these hardware designers expect things to work
7722          * properly if the PCI write is posted for a long period
7723          * of time?  It is always necessary to have some method by
7724          * which a register read back can occur to push the write
7725          * out which does the reset.
7726          *
7727          * For most tg3 variants the trick below was working.
7728          * Ho hum...
7729          */
7730         udelay(120);
7731
7732         /* Flush PCI posted writes.  The normal MMIO registers
7733          * are inaccessible at this time so this is the only
7734          * way to make this reliably (actually, this is no longer
7735          * the case, see above).  I tried to use indirect
7736          * register read/write but this upset some 5701 variants.
7737          */
7738         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7739
7740         udelay(120);
7741
7742         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7743                 u16 val16;
7744
7745                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7746                         int i;
7747                         u32 cfg_val;
7748
7749                         /* Wait for link training to complete.  */
7750                         for (i = 0; i < 5000; i++)
7751                                 udelay(100);
7752
7753                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7754                         pci_write_config_dword(tp->pdev, 0xc4,
7755                                                cfg_val | (1 << 15));
7756                 }
7757
7758                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7759                 pci_read_config_word(tp->pdev,
7760                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7761                                      &val16);
7762                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7763                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7764                 /*
7765                  * Older PCIe devices only support the 128 byte
7766                  * MPS setting.  Enforce the restriction.
7767                  */
7768                 if (!tg3_flag(tp, CPMU_PRESENT))
7769                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7770                 pci_write_config_word(tp->pdev,
7771                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7772                                       val16);
7773
7774                 /* Clear error status */
7775                 pci_write_config_word(tp->pdev,
7776                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7777                                       PCI_EXP_DEVSTA_CED |
7778                                       PCI_EXP_DEVSTA_NFED |
7779                                       PCI_EXP_DEVSTA_FED |
7780                                       PCI_EXP_DEVSTA_URD);
7781         }
7782
7783         tg3_restore_pci_state(tp);
7784
7785         tg3_flag_clear(tp, CHIP_RESETTING);
7786         tg3_flag_clear(tp, ERROR_PROCESSED);
7787
7788         val = 0;
7789         if (tg3_flag(tp, 5780_CLASS))
7790                 val = tr32(MEMARB_MODE);
7791         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7792
7793         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7794                 tg3_stop_fw(tp);
7795                 tw32(0x5000, 0x400);
7796         }
7797
7798         tw32(GRC_MODE, tp->grc_mode);
7799
7800         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7801                 val = tr32(0xc4);
7802
7803                 tw32(0xc4, val | (1 << 15));
7804         }
7805
7806         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7807             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7808                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7809                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7810                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7811                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7812         }
7813
7814         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7815                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7816                 val = tp->mac_mode;
7817         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7818                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7819                 val = tp->mac_mode;
7820         } else
7821                 val = 0;
7822
7823         tw32_f(MAC_MODE, val);
7824         udelay(40);
7825
7826         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7827
7828         err = tg3_poll_fw(tp);
7829         if (err)
7830                 return err;
7831
7832         tg3_mdio_start(tp);
7833
7834         if (tg3_flag(tp, PCI_EXPRESS) &&
7835             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7836             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7837             !tg3_flag(tp, 57765_PLUS)) {
7838                 val = tr32(0x7c00);
7839
7840                 tw32(0x7c00, val | (1 << 25));
7841         }
7842
7843         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7844                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7845                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7846         }
7847
7848         /* Reprobe ASF enable state.  */
7849         tg3_flag_clear(tp, ENABLE_ASF);
7850         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7851         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7852         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7853                 u32 nic_cfg;
7854
7855                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7856                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7857                         tg3_flag_set(tp, ENABLE_ASF);
7858                         tp->last_event_jiffies = jiffies;
7859                         if (tg3_flag(tp, 5750_PLUS))
7860                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7861                 }
7862         }
7863
7864         return 0;
7865 }
7866
7867 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
7868                                                  struct rtnl_link_stats64 *);
7869 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
7870                                                 struct tg3_ethtool_stats *);
7871
7872 /* tp->lock is held. */
7873 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7874 {
7875         int err;
7876
7877         tg3_stop_fw(tp);
7878
7879         tg3_write_sig_pre_reset(tp, kind);
7880
7881         tg3_abort_hw(tp, silent);
7882         err = tg3_chip_reset(tp);
7883
7884         __tg3_set_mac_addr(tp, 0);
7885
7886         tg3_write_sig_legacy(tp, kind);
7887         tg3_write_sig_post_reset(tp, kind);
7888
7889         if (tp->hw_stats) {
7890                 /* Save the stats across chip resets... */
7891                 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
7892                 tg3_get_estats(tp, &tp->estats_prev);
7893
7894                 /* And make sure the next sample is new data */
7895                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7896         }
7897
7898         if (err)
7899                 return err;
7900
7901         return 0;
7902 }
7903
7904 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7905 {
7906         struct tg3 *tp = netdev_priv(dev);
7907         struct sockaddr *addr = p;
7908         int err = 0, skip_mac_1 = 0;
7909
7910         if (!is_valid_ether_addr(addr->sa_data))
7911                 return -EINVAL;
7912
7913         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7914
7915         if (!netif_running(dev))
7916                 return 0;
7917
7918         if (tg3_flag(tp, ENABLE_ASF)) {
7919                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7920
7921                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7922                 addr0_low = tr32(MAC_ADDR_0_LOW);
7923                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7924                 addr1_low = tr32(MAC_ADDR_1_LOW);
7925
7926                 /* Skip MAC addr 1 if ASF is using it. */
7927                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7928                     !(addr1_high == 0 && addr1_low == 0))
7929                         skip_mac_1 = 1;
7930         }
7931         spin_lock_bh(&tp->lock);
7932         __tg3_set_mac_addr(tp, skip_mac_1);
7933         spin_unlock_bh(&tp->lock);
7934
7935         return err;
7936 }
7937
7938 /* tp->lock is held. */
7939 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7940                            dma_addr_t mapping, u32 maxlen_flags,
7941                            u32 nic_addr)
7942 {
7943         tg3_write_mem(tp,
7944                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7945                       ((u64) mapping >> 32));
7946         tg3_write_mem(tp,
7947                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7948                       ((u64) mapping & 0xffffffff));
7949         tg3_write_mem(tp,
7950                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7951                        maxlen_flags);
7952
7953         if (!tg3_flag(tp, 5705_PLUS))
7954                 tg3_write_mem(tp,
7955                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7956                               nic_addr);
7957 }
7958
7959 static void __tg3_set_rx_mode(struct net_device *);
7960 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7961 {
7962         int i;
7963
7964         if (!tg3_flag(tp, ENABLE_TSS)) {
7965                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7966                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7967                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7968         } else {
7969                 tw32(HOSTCC_TXCOL_TICKS, 0);
7970                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7971                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7972         }
7973
7974         if (!tg3_flag(tp, ENABLE_RSS)) {
7975                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7976                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7977                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7978         } else {
7979                 tw32(HOSTCC_RXCOL_TICKS, 0);
7980                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7981                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7982         }
7983
7984         if (!tg3_flag(tp, 5705_PLUS)) {
7985                 u32 val = ec->stats_block_coalesce_usecs;
7986
7987                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7988                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7989
7990                 if (!netif_carrier_ok(tp->dev))
7991                         val = 0;
7992
7993                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7994         }
7995
7996         for (i = 0; i < tp->irq_cnt - 1; i++) {
7997                 u32 reg;
7998
7999                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8000                 tw32(reg, ec->rx_coalesce_usecs);
8001                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8002                 tw32(reg, ec->rx_max_coalesced_frames);
8003                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8004                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8005
8006                 if (tg3_flag(tp, ENABLE_TSS)) {
8007                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8008                         tw32(reg, ec->tx_coalesce_usecs);
8009                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8010                         tw32(reg, ec->tx_max_coalesced_frames);
8011                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8012                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8013                 }
8014         }
8015
8016         for (; i < tp->irq_max - 1; i++) {
8017                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8018                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8019                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8020
8021                 if (tg3_flag(tp, ENABLE_TSS)) {
8022                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8023                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8024                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8025                 }
8026         }
8027 }
8028
8029 /* tp->lock is held. */
8030 static void tg3_rings_reset(struct tg3 *tp)
8031 {
8032         int i;
8033         u32 stblk, txrcb, rxrcb, limit;
8034         struct tg3_napi *tnapi = &tp->napi[0];
8035
8036         /* Disable all transmit rings but the first. */
8037         if (!tg3_flag(tp, 5705_PLUS))
8038                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8039         else if (tg3_flag(tp, 5717_PLUS))
8040                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8041         else if (tg3_flag(tp, 57765_CLASS))
8042                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8043         else
8044                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8045
8046         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8047              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8048                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8049                               BDINFO_FLAGS_DISABLED);
8050
8051
8052         /* Disable all receive return rings but the first. */
8053         if (tg3_flag(tp, 5717_PLUS))
8054                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8055         else if (!tg3_flag(tp, 5705_PLUS))
8056                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8057         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8058                  tg3_flag(tp, 57765_CLASS))
8059                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8060         else
8061                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8062
8063         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8064              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8065                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8066                               BDINFO_FLAGS_DISABLED);
8067
8068         /* Disable interrupts */
8069         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8070         tp->napi[0].chk_msi_cnt = 0;
8071         tp->napi[0].last_rx_cons = 0;
8072         tp->napi[0].last_tx_cons = 0;
8073
8074         /* Zero mailbox registers. */
8075         if (tg3_flag(tp, SUPPORT_MSIX)) {
8076                 for (i = 1; i < tp->irq_max; i++) {
8077                         tp->napi[i].tx_prod = 0;
8078                         tp->napi[i].tx_cons = 0;
8079                         if (tg3_flag(tp, ENABLE_TSS))
8080                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8081                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8082                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8083                         tp->napi[i].chk_msi_cnt = 0;
8084                         tp->napi[i].last_rx_cons = 0;
8085                         tp->napi[i].last_tx_cons = 0;
8086                 }
8087                 if (!tg3_flag(tp, ENABLE_TSS))
8088                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8089         } else {
8090                 tp->napi[0].tx_prod = 0;
8091                 tp->napi[0].tx_cons = 0;
8092                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8093                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8094         }
8095
8096         /* Make sure the NIC-based send BD rings are disabled. */
8097         if (!tg3_flag(tp, 5705_PLUS)) {
8098                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8099                 for (i = 0; i < 16; i++)
8100                         tw32_tx_mbox(mbox + i * 8, 0);
8101         }
8102
8103         txrcb = NIC_SRAM_SEND_RCB;
8104         rxrcb = NIC_SRAM_RCV_RET_RCB;
8105
8106         /* Clear status block in ram. */
8107         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8108
8109         /* Set status block DMA address */
8110         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8111              ((u64) tnapi->status_mapping >> 32));
8112         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8113              ((u64) tnapi->status_mapping & 0xffffffff));
8114
8115         if (tnapi->tx_ring) {
8116                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8117                                (TG3_TX_RING_SIZE <<
8118                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8119                                NIC_SRAM_TX_BUFFER_DESC);
8120                 txrcb += TG3_BDINFO_SIZE;
8121         }
8122
8123         if (tnapi->rx_rcb) {
8124                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8125                                (tp->rx_ret_ring_mask + 1) <<
8126                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8127                 rxrcb += TG3_BDINFO_SIZE;
8128         }
8129
8130         stblk = HOSTCC_STATBLCK_RING1;
8131
8132         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8133                 u64 mapping = (u64)tnapi->status_mapping;
8134                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8135                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8136
8137                 /* Clear status block in ram. */
8138                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8139
8140                 if (tnapi->tx_ring) {
8141                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8142                                        (TG3_TX_RING_SIZE <<
8143                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8144                                        NIC_SRAM_TX_BUFFER_DESC);
8145                         txrcb += TG3_BDINFO_SIZE;
8146                 }
8147
8148                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8149                                ((tp->rx_ret_ring_mask + 1) <<
8150                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8151
8152                 stblk += 8;
8153                 rxrcb += TG3_BDINFO_SIZE;
8154         }
8155 }
8156
8157 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8158 {
8159         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8160
8161         if (!tg3_flag(tp, 5750_PLUS) ||
8162             tg3_flag(tp, 5780_CLASS) ||
8163             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8164             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8165             tg3_flag(tp, 57765_PLUS))
8166                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8167         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8168                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8169                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8170         else
8171                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8172
8173         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8174         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8175
8176         val = min(nic_rep_thresh, host_rep_thresh);
8177         tw32(RCVBDI_STD_THRESH, val);
8178
8179         if (tg3_flag(tp, 57765_PLUS))
8180                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8181
8182         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8183                 return;
8184
8185         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8186
8187         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8188
8189         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8190         tw32(RCVBDI_JUMBO_THRESH, val);
8191
8192         if (tg3_flag(tp, 57765_PLUS))
8193                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8194 }
8195
8196 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8197 {
8198         int i;
8199
8200         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8201                 tp->rss_ind_tbl[i] =
8202                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8203 }
8204
8205 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8206 {
8207         int i;
8208
8209         if (!tg3_flag(tp, SUPPORT_MSIX))
8210                 return;
8211
8212         if (tp->irq_cnt <= 2) {
8213                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8214                 return;
8215         }
8216
8217         /* Validate table against current IRQ count */
8218         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8219                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8220                         break;
8221         }
8222
8223         if (i != TG3_RSS_INDIR_TBL_SIZE)
8224                 tg3_rss_init_dflt_indir_tbl(tp);
8225 }
8226
8227 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8228 {
8229         int i = 0;
8230         u32 reg = MAC_RSS_INDIR_TBL_0;
8231
8232         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8233                 u32 val = tp->rss_ind_tbl[i];
8234                 i++;
8235                 for (; i % 8; i++) {
8236                         val <<= 4;
8237                         val |= tp->rss_ind_tbl[i];
8238                 }
8239                 tw32(reg, val);
8240                 reg += 4;
8241         }
8242 }
8243
8244 /* tp->lock is held. */
8245 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8246 {
8247         u32 val, rdmac_mode;
8248         int i, err, limit;
8249         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8250
8251         tg3_disable_ints(tp);
8252
8253         tg3_stop_fw(tp);
8254
8255         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8256
8257         if (tg3_flag(tp, INIT_COMPLETE))
8258                 tg3_abort_hw(tp, 1);
8259
8260         /* Enable MAC control of LPI */
8261         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8262                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8263                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8264                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8265
8266                 tw32_f(TG3_CPMU_EEE_CTRL,
8267                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8268
8269                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8270                       TG3_CPMU_EEEMD_LPI_IN_TX |
8271                       TG3_CPMU_EEEMD_LPI_IN_RX |
8272                       TG3_CPMU_EEEMD_EEE_ENABLE;
8273
8274                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8275                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8276
8277                 if (tg3_flag(tp, ENABLE_APE))
8278                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8279
8280                 tw32_f(TG3_CPMU_EEE_MODE, val);
8281
8282                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8283                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8284                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8285
8286                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8287                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8288                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8289         }
8290
8291         if (reset_phy)
8292                 tg3_phy_reset(tp);
8293
8294         err = tg3_chip_reset(tp);
8295         if (err)
8296                 return err;
8297
8298         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8299
8300         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8301                 val = tr32(TG3_CPMU_CTRL);
8302                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8303                 tw32(TG3_CPMU_CTRL, val);
8304
8305                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8306                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8307                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8308                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8309
8310                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8311                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8312                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8313                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8314
8315                 val = tr32(TG3_CPMU_HST_ACC);
8316                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8317                 val |= CPMU_HST_ACC_MACCLK_6_25;
8318                 tw32(TG3_CPMU_HST_ACC, val);
8319         }
8320
8321         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8322                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8323                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8324                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8325                 tw32(PCIE_PWR_MGMT_THRESH, val);
8326
8327                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8328                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8329
8330                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8331
8332                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8333                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8334         }
8335
8336         if (tg3_flag(tp, L1PLLPD_EN)) {
8337                 u32 grc_mode = tr32(GRC_MODE);
8338
8339                 /* Access the lower 1K of PL PCIE block registers. */
8340                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8341                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8342
8343                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8344                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8345                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8346
8347                 tw32(GRC_MODE, grc_mode);
8348         }
8349
8350         if (tg3_flag(tp, 57765_CLASS)) {
8351                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8352                         u32 grc_mode = tr32(GRC_MODE);
8353
8354                         /* Access the lower 1K of PL PCIE block registers. */
8355                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8356                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8357
8358                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8359                                    TG3_PCIE_PL_LO_PHYCTL5);
8360                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8361                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8362
8363                         tw32(GRC_MODE, grc_mode);
8364                 }
8365
8366                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8367                         u32 grc_mode = tr32(GRC_MODE);
8368
8369                         /* Access the lower 1K of DL PCIE block registers. */
8370                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8371                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8372
8373                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8374                                    TG3_PCIE_DL_LO_FTSMAX);
8375                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8376                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8377                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8378
8379                         tw32(GRC_MODE, grc_mode);
8380                 }
8381
8382                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8383                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8384                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8385                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8386         }
8387
8388         /* This works around an issue with Athlon chipsets on
8389          * B3 tigon3 silicon.  This bit has no effect on any
8390          * other revision.  But do not set this on PCI Express
8391          * chips and don't even touch the clocks if the CPMU is present.
8392          */
8393         if (!tg3_flag(tp, CPMU_PRESENT)) {
8394                 if (!tg3_flag(tp, PCI_EXPRESS))
8395                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8396                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8397         }
8398
8399         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8400             tg3_flag(tp, PCIX_MODE)) {
8401                 val = tr32(TG3PCI_PCISTATE);
8402                 val |= PCISTATE_RETRY_SAME_DMA;
8403                 tw32(TG3PCI_PCISTATE, val);
8404         }
8405
8406         if (tg3_flag(tp, ENABLE_APE)) {
8407                 /* Allow reads and writes to the
8408                  * APE register and memory space.
8409                  */
8410                 val = tr32(TG3PCI_PCISTATE);
8411                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8412                        PCISTATE_ALLOW_APE_SHMEM_WR |
8413                        PCISTATE_ALLOW_APE_PSPACE_WR;
8414                 tw32(TG3PCI_PCISTATE, val);
8415         }
8416
8417         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8418                 /* Enable some hw fixes.  */
8419                 val = tr32(TG3PCI_MSI_DATA);
8420                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8421                 tw32(TG3PCI_MSI_DATA, val);
8422         }
8423
8424         /* Descriptor ring init may make accesses to the
8425          * NIC SRAM area to setup the TX descriptors, so we
8426          * can only do this after the hardware has been
8427          * successfully reset.
8428          */
8429         err = tg3_init_rings(tp);
8430         if (err)
8431                 return err;
8432
8433         if (tg3_flag(tp, 57765_PLUS)) {
8434                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8435                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8436                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8437                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8438                 if (!tg3_flag(tp, 57765_CLASS) &&
8439                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8440                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8441                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8442         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8443                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8444                 /* This value is determined during the probe time DMA
8445                  * engine test, tg3_test_dma.
8446                  */
8447                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8448         }
8449
8450         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8451                           GRC_MODE_4X_NIC_SEND_RINGS |
8452                           GRC_MODE_NO_TX_PHDR_CSUM |
8453                           GRC_MODE_NO_RX_PHDR_CSUM);
8454         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8455
8456         /* Pseudo-header checksum is done by hardware logic and not
8457          * the offload processers, so make the chip do the pseudo-
8458          * header checksums on receive.  For transmit it is more
8459          * convenient to do the pseudo-header checksum in software
8460          * as Linux does that on transmit for us in all cases.
8461          */
8462         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8463
8464         tw32(GRC_MODE,
8465              tp->grc_mode |
8466              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8467
8468         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8469         val = tr32(GRC_MISC_CFG);
8470         val &= ~0xff;
8471         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8472         tw32(GRC_MISC_CFG, val);
8473
8474         /* Initialize MBUF/DESC pool. */
8475         if (tg3_flag(tp, 5750_PLUS)) {
8476                 /* Do nothing.  */
8477         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8478                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8479                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8480                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8481                 else
8482                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8483                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8484                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8485         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8486                 int fw_len;
8487
8488                 fw_len = tp->fw_len;
8489                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8490                 tw32(BUFMGR_MB_POOL_ADDR,
8491                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8492                 tw32(BUFMGR_MB_POOL_SIZE,
8493                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8494         }
8495
8496         if (tp->dev->mtu <= ETH_DATA_LEN) {
8497                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8498                      tp->bufmgr_config.mbuf_read_dma_low_water);
8499                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8500                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8501                 tw32(BUFMGR_MB_HIGH_WATER,
8502                      tp->bufmgr_config.mbuf_high_water);
8503         } else {
8504                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8505                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8506                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8507                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8508                 tw32(BUFMGR_MB_HIGH_WATER,
8509                      tp->bufmgr_config.mbuf_high_water_jumbo);
8510         }
8511         tw32(BUFMGR_DMA_LOW_WATER,
8512              tp->bufmgr_config.dma_low_water);
8513         tw32(BUFMGR_DMA_HIGH_WATER,
8514              tp->bufmgr_config.dma_high_water);
8515
8516         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8517         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8518                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8520             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8521             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8522                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8523         tw32(BUFMGR_MODE, val);
8524         for (i = 0; i < 2000; i++) {
8525                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8526                         break;
8527                 udelay(10);
8528         }
8529         if (i >= 2000) {
8530                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8531                 return -ENODEV;
8532         }
8533
8534         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8535                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8536
8537         tg3_setup_rxbd_thresholds(tp);
8538
8539         /* Initialize TG3_BDINFO's at:
8540          *  RCVDBDI_STD_BD:     standard eth size rx ring
8541          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8542          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8543          *
8544          * like so:
8545          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8546          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8547          *                              ring attribute flags
8548          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8549          *
8550          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8551          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8552          *
8553          * The size of each ring is fixed in the firmware, but the location is
8554          * configurable.
8555          */
8556         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8557              ((u64) tpr->rx_std_mapping >> 32));
8558         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8559              ((u64) tpr->rx_std_mapping & 0xffffffff));
8560         if (!tg3_flag(tp, 5717_PLUS))
8561                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8562                      NIC_SRAM_RX_BUFFER_DESC);
8563
8564         /* Disable the mini ring */
8565         if (!tg3_flag(tp, 5705_PLUS))
8566                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8567                      BDINFO_FLAGS_DISABLED);
8568
8569         /* Program the jumbo buffer descriptor ring control
8570          * blocks on those devices that have them.
8571          */
8572         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8573             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8574
8575                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8576                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8577                              ((u64) tpr->rx_jmb_mapping >> 32));
8578                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8579                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8580                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8581                               BDINFO_FLAGS_MAXLEN_SHIFT;
8582                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8583                              val | BDINFO_FLAGS_USE_EXT_RECV);
8584                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8585                             tg3_flag(tp, 57765_CLASS))
8586                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8587                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8588                 } else {
8589                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8590                              BDINFO_FLAGS_DISABLED);
8591                 }
8592
8593                 if (tg3_flag(tp, 57765_PLUS)) {
8594                         val = TG3_RX_STD_RING_SIZE(tp);
8595                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8596                         val |= (TG3_RX_STD_DMA_SZ << 2);
8597                 } else
8598                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8599         } else
8600                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8601
8602         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8603
8604         tpr->rx_std_prod_idx = tp->rx_pending;
8605         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8606
8607         tpr->rx_jmb_prod_idx =
8608                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8609         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8610
8611         tg3_rings_reset(tp);
8612
8613         /* Initialize MAC address and backoff seed. */
8614         __tg3_set_mac_addr(tp, 0);
8615
8616         /* MTU + ethernet header + FCS + optional VLAN tag */
8617         tw32(MAC_RX_MTU_SIZE,
8618              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8619
8620         /* The slot time is changed by tg3_setup_phy if we
8621          * run at gigabit with half duplex.
8622          */
8623         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8624               (6 << TX_LENGTHS_IPG_SHIFT) |
8625               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8626
8627         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8628                 val |= tr32(MAC_TX_LENGTHS) &
8629                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8630                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8631
8632         tw32(MAC_TX_LENGTHS, val);
8633
8634         /* Receive rules. */
8635         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8636         tw32(RCVLPC_CONFIG, 0x0181);
8637
8638         /* Calculate RDMAC_MODE setting early, we need it to determine
8639          * the RCVLPC_STATE_ENABLE mask.
8640          */
8641         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8642                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8643                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8644                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8645                       RDMAC_MODE_LNGREAD_ENAB);
8646
8647         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8648                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8649
8650         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8651             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8652             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8653                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8654                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8655                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8656
8657         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8658             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8659                 if (tg3_flag(tp, TSO_CAPABLE) &&
8660                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8661                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8662                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8663                            !tg3_flag(tp, IS_5788)) {
8664                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8665                 }
8666         }
8667
8668         if (tg3_flag(tp, PCI_EXPRESS))
8669                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8670
8671         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
8672                 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
8673
8674         if (tg3_flag(tp, HW_TSO_1) ||
8675             tg3_flag(tp, HW_TSO_2) ||
8676             tg3_flag(tp, HW_TSO_3))
8677                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8678
8679         if (tg3_flag(tp, 57765_PLUS) ||
8680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8682                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8683
8684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8685                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8686
8687         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8688             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8689             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8690             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8691             tg3_flag(tp, 57765_PLUS)) {
8692                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8693                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8694                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8695                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8696                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8697                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8698                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8699                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8700                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8701                 }
8702                 tw32(TG3_RDMA_RSRVCTRL_REG,
8703                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8704         }
8705
8706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8707             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8708                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8709                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8710                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8711                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8712         }
8713
8714         /* Receive/send statistics. */
8715         if (tg3_flag(tp, 5750_PLUS)) {
8716                 val = tr32(RCVLPC_STATS_ENABLE);
8717                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8718                 tw32(RCVLPC_STATS_ENABLE, val);
8719         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8720                    tg3_flag(tp, TSO_CAPABLE)) {
8721                 val = tr32(RCVLPC_STATS_ENABLE);
8722                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8723                 tw32(RCVLPC_STATS_ENABLE, val);
8724         } else {
8725                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8726         }
8727         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8728         tw32(SNDDATAI_STATSENAB, 0xffffff);
8729         tw32(SNDDATAI_STATSCTRL,
8730              (SNDDATAI_SCTRL_ENABLE |
8731               SNDDATAI_SCTRL_FASTUPD));
8732
8733         /* Setup host coalescing engine. */
8734         tw32(HOSTCC_MODE, 0);
8735         for (i = 0; i < 2000; i++) {
8736                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8737                         break;
8738                 udelay(10);
8739         }
8740
8741         __tg3_set_coalesce(tp, &tp->coal);
8742
8743         if (!tg3_flag(tp, 5705_PLUS)) {
8744                 /* Status/statistics block address.  See tg3_timer,
8745                  * the tg3_periodic_fetch_stats call there, and
8746                  * tg3_get_stats to see how this works for 5705/5750 chips.
8747                  */
8748                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8749                      ((u64) tp->stats_mapping >> 32));
8750                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8751                      ((u64) tp->stats_mapping & 0xffffffff));
8752                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8753
8754                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8755
8756                 /* Clear statistics and status block memory areas */
8757                 for (i = NIC_SRAM_STATS_BLK;
8758                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8759                      i += sizeof(u32)) {
8760                         tg3_write_mem(tp, i, 0);
8761                         udelay(40);
8762                 }
8763         }
8764
8765         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8766
8767         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8768         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8769         if (!tg3_flag(tp, 5705_PLUS))
8770                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8771
8772         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8773                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8774                 /* reset to prevent losing 1st rx packet intermittently */
8775                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8776                 udelay(10);
8777         }
8778
8779         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8780                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8781                         MAC_MODE_FHDE_ENABLE;
8782         if (tg3_flag(tp, ENABLE_APE))
8783                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8784         if (!tg3_flag(tp, 5705_PLUS) &&
8785             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8786             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8787                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8788         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8789         udelay(40);
8790
8791         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8792          * If TG3_FLAG_IS_NIC is zero, we should read the
8793          * register to preserve the GPIO settings for LOMs. The GPIOs,
8794          * whether used as inputs or outputs, are set by boot code after
8795          * reset.
8796          */
8797         if (!tg3_flag(tp, IS_NIC)) {
8798                 u32 gpio_mask;
8799
8800                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8801                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8802                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8803
8804                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8805                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8806                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8807
8808                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8809                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8810
8811                 tp->grc_local_ctrl &= ~gpio_mask;
8812                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8813
8814                 /* GPIO1 must be driven high for eeprom write protect */
8815                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8816                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8817                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8818         }
8819         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8820         udelay(100);
8821
8822         if (tg3_flag(tp, USING_MSIX)) {
8823                 val = tr32(MSGINT_MODE);
8824                 val |= MSGINT_MODE_ENABLE;
8825                 if (tp->irq_cnt > 1)
8826                         val |= MSGINT_MODE_MULTIVEC_EN;
8827                 if (!tg3_flag(tp, 1SHOT_MSI))
8828                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8829                 tw32(MSGINT_MODE, val);
8830         }
8831
8832         if (!tg3_flag(tp, 5705_PLUS)) {
8833                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8834                 udelay(40);
8835         }
8836
8837         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8838                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8839                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8840                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8841                WDMAC_MODE_LNGREAD_ENAB);
8842
8843         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8844             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8845                 if (tg3_flag(tp, TSO_CAPABLE) &&
8846                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8847                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8848                         /* nothing */
8849                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8850                            !tg3_flag(tp, IS_5788)) {
8851                         val |= WDMAC_MODE_RX_ACCEL;
8852                 }
8853         }
8854
8855         /* Enable host coalescing bug fix */
8856         if (tg3_flag(tp, 5755_PLUS))
8857                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8858
8859         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8860                 val |= WDMAC_MODE_BURST_ALL_DATA;
8861
8862         tw32_f(WDMAC_MODE, val);
8863         udelay(40);
8864
8865         if (tg3_flag(tp, PCIX_MODE)) {
8866                 u16 pcix_cmd;
8867
8868                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8869                                      &pcix_cmd);
8870                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8871                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8872                         pcix_cmd |= PCI_X_CMD_READ_2K;
8873                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8874                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8875                         pcix_cmd |= PCI_X_CMD_READ_2K;
8876                 }
8877                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8878                                       pcix_cmd);
8879         }
8880
8881         tw32_f(RDMAC_MODE, rdmac_mode);
8882         udelay(40);
8883
8884         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8885         if (!tg3_flag(tp, 5705_PLUS))
8886                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8887
8888         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8889                 tw32(SNDDATAC_MODE,
8890                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8891         else
8892                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8893
8894         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8895         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8896         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8897         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8898                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8899         tw32(RCVDBDI_MODE, val);
8900         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8901         if (tg3_flag(tp, HW_TSO_1) ||
8902             tg3_flag(tp, HW_TSO_2) ||
8903             tg3_flag(tp, HW_TSO_3))
8904                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8905         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8906         if (tg3_flag(tp, ENABLE_TSS))
8907                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8908         tw32(SNDBDI_MODE, val);
8909         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8910
8911         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8912                 err = tg3_load_5701_a0_firmware_fix(tp);
8913                 if (err)
8914                         return err;
8915         }
8916
8917         if (tg3_flag(tp, TSO_CAPABLE)) {
8918                 err = tg3_load_tso_firmware(tp);
8919                 if (err)
8920                         return err;
8921         }
8922
8923         tp->tx_mode = TX_MODE_ENABLE;
8924
8925         if (tg3_flag(tp, 5755_PLUS) ||
8926             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8927                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8928
8929         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8930                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8931                 tp->tx_mode &= ~val;
8932                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8933         }
8934
8935         tw32_f(MAC_TX_MODE, tp->tx_mode);
8936         udelay(100);
8937
8938         if (tg3_flag(tp, ENABLE_RSS)) {
8939                 tg3_rss_write_indir_tbl(tp);
8940
8941                 /* Setup the "secret" hash key. */
8942                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8943                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8944                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8945                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8946                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8947                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8948                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8949                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8950                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8951                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8952         }
8953
8954         tp->rx_mode = RX_MODE_ENABLE;
8955         if (tg3_flag(tp, 5755_PLUS))
8956                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8957
8958         if (tg3_flag(tp, ENABLE_RSS))
8959                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8960                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8961                                RX_MODE_RSS_IPV6_HASH_EN |
8962                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8963                                RX_MODE_RSS_IPV4_HASH_EN |
8964                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8965
8966         tw32_f(MAC_RX_MODE, tp->rx_mode);
8967         udelay(10);
8968
8969         tw32(MAC_LED_CTRL, tp->led_ctrl);
8970
8971         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8972         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8973                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8974                 udelay(10);
8975         }
8976         tw32_f(MAC_RX_MODE, tp->rx_mode);
8977         udelay(10);
8978
8979         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8980                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8981                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8982                         /* Set drive transmission level to 1.2V  */
8983                         /* only if the signal pre-emphasis bit is not set  */
8984                         val = tr32(MAC_SERDES_CFG);
8985                         val &= 0xfffff000;
8986                         val |= 0x880;
8987                         tw32(MAC_SERDES_CFG, val);
8988                 }
8989                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8990                         tw32(MAC_SERDES_CFG, 0x616000);
8991         }
8992
8993         /* Prevent chip from dropping frames when flow control
8994          * is enabled.
8995          */
8996         if (tg3_flag(tp, 57765_CLASS))
8997                 val = 1;
8998         else
8999                 val = 2;
9000         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9001
9002         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9003             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9004                 /* Use hardware link auto-negotiation */
9005                 tg3_flag_set(tp, HW_AUTONEG);
9006         }
9007
9008         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9009             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9010                 u32 tmp;
9011
9012                 tmp = tr32(SERDES_RX_CTRL);
9013                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9014                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9015                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9016                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9017         }
9018
9019         if (!tg3_flag(tp, USE_PHYLIB)) {
9020                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9021                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9022                         tp->link_config.speed = tp->link_config.orig_speed;
9023                         tp->link_config.duplex = tp->link_config.orig_duplex;
9024                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9025                 }
9026
9027                 err = tg3_setup_phy(tp, 0);
9028                 if (err)
9029                         return err;
9030
9031                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9032                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9033                         u32 tmp;
9034
9035                         /* Clear CRC stats. */
9036                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9037                                 tg3_writephy(tp, MII_TG3_TEST1,
9038                                              tmp | MII_TG3_TEST1_CRC_EN);
9039                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9040                         }
9041                 }
9042         }
9043
9044         __tg3_set_rx_mode(tp->dev);
9045
9046         /* Initialize receive rules. */
9047         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9048         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9049         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9050         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9051
9052         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9053                 limit = 8;
9054         else
9055                 limit = 16;
9056         if (tg3_flag(tp, ENABLE_ASF))
9057                 limit -= 4;
9058         switch (limit) {
9059         case 16:
9060                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9061         case 15:
9062                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9063         case 14:
9064                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9065         case 13:
9066                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9067         case 12:
9068                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9069         case 11:
9070                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9071         case 10:
9072                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9073         case 9:
9074                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9075         case 8:
9076                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9077         case 7:
9078                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9079         case 6:
9080                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9081         case 5:
9082                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9083         case 4:
9084                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9085         case 3:
9086                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9087         case 2:
9088         case 1:
9089
9090         default:
9091                 break;
9092         }
9093
9094         if (tg3_flag(tp, ENABLE_APE))
9095                 /* Write our heartbeat update interval to APE. */
9096                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9097                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9098
9099         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9100
9101         return 0;
9102 }
9103
9104 /* Called at device open time to get the chip ready for
9105  * packet processing.  Invoked with tp->lock held.
9106  */
9107 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9108 {
9109         tg3_switch_clocks(tp);
9110
9111         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9112
9113         return tg3_reset_hw(tp, reset_phy);
9114 }
9115
9116 /* Restart hardware after configuration changes, self-test, etc.
9117  * Invoked with tp->lock held.
9118  */
9119 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9120         __releases(tp->lock)
9121         __acquires(tp->lock)
9122 {
9123         int err;
9124
9125         err = tg3_init_hw(tp, reset_phy);
9126         if (err) {
9127                 netdev_err(tp->dev,
9128                            "Failed to re-initialize device, aborting\n");
9129                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9130                 tg3_full_unlock(tp);
9131                 del_timer_sync(&tp->timer);
9132                 tp->irq_sync = 0;
9133                 tg3_napi_enable(tp);
9134                 dev_close(tp->dev);
9135                 tg3_full_lock(tp, 0);
9136         }
9137         return err;
9138 }
9139
9140 #define TG3_STAT_ADD32(PSTAT, REG) \
9141 do {    u32 __val = tr32(REG); \
9142         (PSTAT)->low += __val; \
9143         if ((PSTAT)->low < __val) \
9144                 (PSTAT)->high += 1; \
9145 } while (0)
9146
9147 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9148 {
9149         struct tg3_hw_stats *sp = tp->hw_stats;
9150
9151         if (!netif_carrier_ok(tp->dev))
9152                 return;
9153
9154         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9155         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9156         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9157         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9158         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9159         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9160         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9161         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9162         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9163         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9164         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9165         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9166         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9167
9168         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9169         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9170         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9171         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9172         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9173         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9174         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9175         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9176         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9177         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9178         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9179         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9180         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9181         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9182
9183         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9184         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9185             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9186             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9187                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9188         } else {
9189                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9190                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9191                 if (val) {
9192                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9193                         sp->rx_discards.low += val;
9194                         if (sp->rx_discards.low < val)
9195                                 sp->rx_discards.high += 1;
9196                 }
9197                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9198         }
9199         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9200 }
9201
9202 static void tg3_chk_missed_msi(struct tg3 *tp)
9203 {
9204         u32 i;
9205
9206         for (i = 0; i < tp->irq_cnt; i++) {
9207                 struct tg3_napi *tnapi = &tp->napi[i];
9208
9209                 if (tg3_has_work(tnapi)) {
9210                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9211                             tnapi->last_tx_cons == tnapi->tx_cons) {
9212                                 if (tnapi->chk_msi_cnt < 1) {
9213                                         tnapi->chk_msi_cnt++;
9214                                         return;
9215                                 }
9216                                 tg3_msi(0, tnapi);
9217                         }
9218                 }
9219                 tnapi->chk_msi_cnt = 0;
9220                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9221                 tnapi->last_tx_cons = tnapi->tx_cons;
9222         }
9223 }
9224
9225 static void tg3_timer(unsigned long __opaque)
9226 {
9227         struct tg3 *tp = (struct tg3 *) __opaque;
9228
9229         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9230                 goto restart_timer;
9231
9232         spin_lock(&tp->lock);
9233
9234         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9235             tg3_flag(tp, 57765_CLASS))
9236                 tg3_chk_missed_msi(tp);
9237
9238         if (!tg3_flag(tp, TAGGED_STATUS)) {
9239                 /* All of this garbage is because when using non-tagged
9240                  * IRQ status the mailbox/status_block protocol the chip
9241                  * uses with the cpu is race prone.
9242                  */
9243                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9244                         tw32(GRC_LOCAL_CTRL,
9245                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9246                 } else {
9247                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9248                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9249                 }
9250
9251                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9252                         spin_unlock(&tp->lock);
9253                         tg3_reset_task_schedule(tp);
9254                         goto restart_timer;
9255                 }
9256         }
9257
9258         /* This part only runs once per second. */
9259         if (!--tp->timer_counter) {
9260                 if (tg3_flag(tp, 5705_PLUS))
9261                         tg3_periodic_fetch_stats(tp);
9262
9263                 if (tp->setlpicnt && !--tp->setlpicnt)
9264                         tg3_phy_eee_enable(tp);
9265
9266                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9267                         u32 mac_stat;
9268                         int phy_event;
9269
9270                         mac_stat = tr32(MAC_STATUS);
9271
9272                         phy_event = 0;
9273                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9274                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9275                                         phy_event = 1;
9276                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9277                                 phy_event = 1;
9278
9279                         if (phy_event)
9280                                 tg3_setup_phy(tp, 0);
9281                 } else if (tg3_flag(tp, POLL_SERDES)) {
9282                         u32 mac_stat = tr32(MAC_STATUS);
9283                         int need_setup = 0;
9284
9285                         if (netif_carrier_ok(tp->dev) &&
9286                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9287                                 need_setup = 1;
9288                         }
9289                         if (!netif_carrier_ok(tp->dev) &&
9290                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9291                                          MAC_STATUS_SIGNAL_DET))) {
9292                                 need_setup = 1;
9293                         }
9294                         if (need_setup) {
9295                                 if (!tp->serdes_counter) {
9296                                         tw32_f(MAC_MODE,
9297                                              (tp->mac_mode &
9298                                               ~MAC_MODE_PORT_MODE_MASK));
9299                                         udelay(40);
9300                                         tw32_f(MAC_MODE, tp->mac_mode);
9301                                         udelay(40);
9302                                 }
9303                                 tg3_setup_phy(tp, 0);
9304                         }
9305                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9306                            tg3_flag(tp, 5780_CLASS)) {
9307                         tg3_serdes_parallel_detect(tp);
9308                 }
9309
9310                 tp->timer_counter = tp->timer_multiplier;
9311         }
9312
9313         /* Heartbeat is only sent once every 2 seconds.
9314          *
9315          * The heartbeat is to tell the ASF firmware that the host
9316          * driver is still alive.  In the event that the OS crashes,
9317          * ASF needs to reset the hardware to free up the FIFO space
9318          * that may be filled with rx packets destined for the host.
9319          * If the FIFO is full, ASF will no longer function properly.
9320          *
9321          * Unintended resets have been reported on real time kernels
9322          * where the timer doesn't run on time.  Netpoll will also have
9323          * same problem.
9324          *
9325          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9326          * to check the ring condition when the heartbeat is expiring
9327          * before doing the reset.  This will prevent most unintended
9328          * resets.
9329          */
9330         if (!--tp->asf_counter) {
9331                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9332                         tg3_wait_for_event_ack(tp);
9333
9334                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9335                                       FWCMD_NICDRV_ALIVE3);
9336                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9337                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9338                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9339
9340                         tg3_generate_fw_event(tp);
9341                 }
9342                 tp->asf_counter = tp->asf_multiplier;
9343         }
9344
9345         spin_unlock(&tp->lock);
9346
9347 restart_timer:
9348         tp->timer.expires = jiffies + tp->timer_offset;
9349         add_timer(&tp->timer);
9350 }
9351
9352 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9353 {
9354         irq_handler_t fn;
9355         unsigned long flags;
9356         char *name;
9357         struct tg3_napi *tnapi = &tp->napi[irq_num];
9358
9359         if (tp->irq_cnt == 1)
9360                 name = tp->dev->name;
9361         else {
9362                 name = &tnapi->irq_lbl[0];
9363                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9364                 name[IFNAMSIZ-1] = 0;
9365         }
9366
9367         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9368                 fn = tg3_msi;
9369                 if (tg3_flag(tp, 1SHOT_MSI))
9370                         fn = tg3_msi_1shot;
9371                 flags = 0;
9372         } else {
9373                 fn = tg3_interrupt;
9374                 if (tg3_flag(tp, TAGGED_STATUS))
9375                         fn = tg3_interrupt_tagged;
9376                 flags = IRQF_SHARED;
9377         }
9378
9379         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9380 }
9381
9382 static int tg3_test_interrupt(struct tg3 *tp)
9383 {
9384         struct tg3_napi *tnapi = &tp->napi[0];
9385         struct net_device *dev = tp->dev;
9386         int err, i, intr_ok = 0;
9387         u32 val;
9388
9389         if (!netif_running(dev))
9390                 return -ENODEV;
9391
9392         tg3_disable_ints(tp);
9393
9394         free_irq(tnapi->irq_vec, tnapi);
9395
9396         /*
9397          * Turn off MSI one shot mode.  Otherwise this test has no
9398          * observable way to know whether the interrupt was delivered.
9399          */
9400         if (tg3_flag(tp, 57765_PLUS)) {
9401                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9402                 tw32(MSGINT_MODE, val);
9403         }
9404
9405         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9406                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9407         if (err)
9408                 return err;
9409
9410         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9411         tg3_enable_ints(tp);
9412
9413         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9414                tnapi->coal_now);
9415
9416         for (i = 0; i < 5; i++) {
9417                 u32 int_mbox, misc_host_ctrl;
9418
9419                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9420                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9421
9422                 if ((int_mbox != 0) ||
9423                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9424                         intr_ok = 1;
9425                         break;
9426                 }
9427
9428                 if (tg3_flag(tp, 57765_PLUS) &&
9429                     tnapi->hw_status->status_tag != tnapi->last_tag)
9430                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9431
9432                 msleep(10);
9433         }
9434
9435         tg3_disable_ints(tp);
9436
9437         free_irq(tnapi->irq_vec, tnapi);
9438
9439         err = tg3_request_irq(tp, 0);
9440
9441         if (err)
9442                 return err;
9443
9444         if (intr_ok) {
9445                 /* Reenable MSI one shot mode. */
9446                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9447                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9448                         tw32(MSGINT_MODE, val);
9449                 }
9450                 return 0;
9451         }
9452
9453         return -EIO;
9454 }
9455
9456 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9457  * successfully restored
9458  */
9459 static int tg3_test_msi(struct tg3 *tp)
9460 {
9461         int err;
9462         u16 pci_cmd;
9463
9464         if (!tg3_flag(tp, USING_MSI))
9465                 return 0;
9466
9467         /* Turn off SERR reporting in case MSI terminates with Master
9468          * Abort.
9469          */
9470         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9471         pci_write_config_word(tp->pdev, PCI_COMMAND,
9472                               pci_cmd & ~PCI_COMMAND_SERR);
9473
9474         err = tg3_test_interrupt(tp);
9475
9476         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9477
9478         if (!err)
9479                 return 0;
9480
9481         /* other failures */
9482         if (err != -EIO)
9483                 return err;
9484
9485         /* MSI test failed, go back to INTx mode */
9486         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9487                     "to INTx mode. Please report this failure to the PCI "
9488                     "maintainer and include system chipset information\n");
9489
9490         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9491
9492         pci_disable_msi(tp->pdev);
9493
9494         tg3_flag_clear(tp, USING_MSI);
9495         tp->napi[0].irq_vec = tp->pdev->irq;
9496
9497         err = tg3_request_irq(tp, 0);
9498         if (err)
9499                 return err;
9500
9501         /* Need to reset the chip because the MSI cycle may have terminated
9502          * with Master Abort.
9503          */
9504         tg3_full_lock(tp, 1);
9505
9506         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9507         err = tg3_init_hw(tp, 1);
9508
9509         tg3_full_unlock(tp);
9510
9511         if (err)
9512                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9513
9514         return err;
9515 }
9516
9517 static int tg3_request_firmware(struct tg3 *tp)
9518 {
9519         const __be32 *fw_data;
9520
9521         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9522                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9523                            tp->fw_needed);
9524                 return -ENOENT;
9525         }
9526
9527         fw_data = (void *)tp->fw->data;
9528
9529         /* Firmware blob starts with version numbers, followed by
9530          * start address and _full_ length including BSS sections
9531          * (which must be longer than the actual data, of course
9532          */
9533
9534         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9535         if (tp->fw_len < (tp->fw->size - 12)) {
9536                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9537                            tp->fw_len, tp->fw_needed);
9538                 release_firmware(tp->fw);
9539                 tp->fw = NULL;
9540                 return -EINVAL;
9541         }
9542
9543         /* We no longer need firmware; we have it. */
9544         tp->fw_needed = NULL;
9545         return 0;
9546 }
9547
9548 static bool tg3_enable_msix(struct tg3 *tp)
9549 {
9550         int i, rc;
9551         struct msix_entry msix_ent[tp->irq_max];
9552
9553         tp->irq_cnt = num_online_cpus();
9554         if (tp->irq_cnt > 1) {
9555                 /* We want as many rx rings enabled as there are cpus.
9556                  * In multiqueue MSI-X mode, the first MSI-X vector
9557                  * only deals with link interrupts, etc, so we add
9558                  * one to the number of vectors we are requesting.
9559                  */
9560                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9561         }
9562
9563         for (i = 0; i < tp->irq_max; i++) {
9564                 msix_ent[i].entry  = i;
9565                 msix_ent[i].vector = 0;
9566         }
9567
9568         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9569         if (rc < 0) {
9570                 return false;
9571         } else if (rc != 0) {
9572                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9573                         return false;
9574                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9575                               tp->irq_cnt, rc);
9576                 tp->irq_cnt = rc;
9577         }
9578
9579         for (i = 0; i < tp->irq_max; i++)
9580                 tp->napi[i].irq_vec = msix_ent[i].vector;
9581
9582         netif_set_real_num_tx_queues(tp->dev, 1);
9583         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9584         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9585                 pci_disable_msix(tp->pdev);
9586                 return false;
9587         }
9588
9589         if (tp->irq_cnt > 1) {
9590                 tg3_flag_set(tp, ENABLE_RSS);
9591
9592                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9593                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9594                         tg3_flag_set(tp, ENABLE_TSS);
9595                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9596                 }
9597         }
9598
9599         return true;
9600 }
9601
9602 static void tg3_ints_init(struct tg3 *tp)
9603 {
9604         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9605             !tg3_flag(tp, TAGGED_STATUS)) {
9606                 /* All MSI supporting chips should support tagged
9607                  * status.  Assert that this is the case.
9608                  */
9609                 netdev_warn(tp->dev,
9610                             "MSI without TAGGED_STATUS? Not using MSI\n");
9611                 goto defcfg;
9612         }
9613
9614         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9615                 tg3_flag_set(tp, USING_MSIX);
9616         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9617                 tg3_flag_set(tp, USING_MSI);
9618
9619         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9620                 u32 msi_mode = tr32(MSGINT_MODE);
9621                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9622                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9623                 if (!tg3_flag(tp, 1SHOT_MSI))
9624                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9625                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9626         }
9627 defcfg:
9628         if (!tg3_flag(tp, USING_MSIX)) {
9629                 tp->irq_cnt = 1;
9630                 tp->napi[0].irq_vec = tp->pdev->irq;
9631                 netif_set_real_num_tx_queues(tp->dev, 1);
9632                 netif_set_real_num_rx_queues(tp->dev, 1);
9633         }
9634 }
9635
9636 static void tg3_ints_fini(struct tg3 *tp)
9637 {
9638         if (tg3_flag(tp, USING_MSIX))
9639                 pci_disable_msix(tp->pdev);
9640         else if (tg3_flag(tp, USING_MSI))
9641                 pci_disable_msi(tp->pdev);
9642         tg3_flag_clear(tp, USING_MSI);
9643         tg3_flag_clear(tp, USING_MSIX);
9644         tg3_flag_clear(tp, ENABLE_RSS);
9645         tg3_flag_clear(tp, ENABLE_TSS);
9646 }
9647
9648 static int tg3_open(struct net_device *dev)
9649 {
9650         struct tg3 *tp = netdev_priv(dev);
9651         int i, err;
9652
9653         if (tp->fw_needed) {
9654                 err = tg3_request_firmware(tp);
9655                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9656                         if (err)
9657                                 return err;
9658                 } else if (err) {
9659                         netdev_warn(tp->dev, "TSO capability disabled\n");
9660                         tg3_flag_clear(tp, TSO_CAPABLE);
9661                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9662                         netdev_notice(tp->dev, "TSO capability restored\n");
9663                         tg3_flag_set(tp, TSO_CAPABLE);
9664                 }
9665         }
9666
9667         netif_carrier_off(tp->dev);
9668
9669         err = tg3_power_up(tp);
9670         if (err)
9671                 return err;
9672
9673         tg3_full_lock(tp, 0);
9674
9675         tg3_disable_ints(tp);
9676         tg3_flag_clear(tp, INIT_COMPLETE);
9677
9678         tg3_full_unlock(tp);
9679
9680         /*
9681          * Setup interrupts first so we know how
9682          * many NAPI resources to allocate
9683          */
9684         tg3_ints_init(tp);
9685
9686         tg3_rss_check_indir_tbl(tp);
9687
9688         /* The placement of this call is tied
9689          * to the setup and use of Host TX descriptors.
9690          */
9691         err = tg3_alloc_consistent(tp);
9692         if (err)
9693                 goto err_out1;
9694
9695         tg3_napi_init(tp);
9696
9697         tg3_napi_enable(tp);
9698
9699         for (i = 0; i < tp->irq_cnt; i++) {
9700                 struct tg3_napi *tnapi = &tp->napi[i];
9701                 err = tg3_request_irq(tp, i);
9702                 if (err) {
9703                         for (i--; i >= 0; i--) {
9704                                 tnapi = &tp->napi[i];
9705                                 free_irq(tnapi->irq_vec, tnapi);
9706                         }
9707                         goto err_out2;
9708                 }
9709         }
9710
9711         tg3_full_lock(tp, 0);
9712
9713         err = tg3_init_hw(tp, 1);
9714         if (err) {
9715                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9716                 tg3_free_rings(tp);
9717         } else {
9718                 if (tg3_flag(tp, TAGGED_STATUS) &&
9719                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9720                     !tg3_flag(tp, 57765_CLASS))
9721                         tp->timer_offset = HZ;
9722                 else
9723                         tp->timer_offset = HZ / 10;
9724
9725                 BUG_ON(tp->timer_offset > HZ);
9726                 tp->timer_counter = tp->timer_multiplier =
9727                         (HZ / tp->timer_offset);
9728                 tp->asf_counter = tp->asf_multiplier =
9729                         ((HZ / tp->timer_offset) * 2);
9730
9731                 init_timer(&tp->timer);
9732                 tp->timer.expires = jiffies + tp->timer_offset;
9733                 tp->timer.data = (unsigned long) tp;
9734                 tp->timer.function = tg3_timer;
9735         }
9736
9737         tg3_full_unlock(tp);
9738
9739         if (err)
9740                 goto err_out3;
9741
9742         if (tg3_flag(tp, USING_MSI)) {
9743                 err = tg3_test_msi(tp);
9744
9745                 if (err) {
9746                         tg3_full_lock(tp, 0);
9747                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9748                         tg3_free_rings(tp);
9749                         tg3_full_unlock(tp);
9750
9751                         goto err_out2;
9752                 }
9753
9754                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9755                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9756
9757                         tw32(PCIE_TRANSACTION_CFG,
9758                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9759                 }
9760         }
9761
9762         tg3_phy_start(tp);
9763
9764         tg3_full_lock(tp, 0);
9765
9766         add_timer(&tp->timer);
9767         tg3_flag_set(tp, INIT_COMPLETE);
9768         tg3_enable_ints(tp);
9769
9770         tg3_full_unlock(tp);
9771
9772         netif_tx_start_all_queues(dev);
9773
9774         /*
9775          * Reset loopback feature if it was turned on while the device was down
9776          * make sure that it's installed properly now.
9777          */
9778         if (dev->features & NETIF_F_LOOPBACK)
9779                 tg3_set_loopback(dev, dev->features);
9780
9781         return 0;
9782
9783 err_out3:
9784         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9785                 struct tg3_napi *tnapi = &tp->napi[i];
9786                 free_irq(tnapi->irq_vec, tnapi);
9787         }
9788
9789 err_out2:
9790         tg3_napi_disable(tp);
9791         tg3_napi_fini(tp);
9792         tg3_free_consistent(tp);
9793
9794 err_out1:
9795         tg3_ints_fini(tp);
9796         tg3_frob_aux_power(tp, false);
9797         pci_set_power_state(tp->pdev, PCI_D3hot);
9798         return err;
9799 }
9800
9801 static int tg3_close(struct net_device *dev)
9802 {
9803         int i;
9804         struct tg3 *tp = netdev_priv(dev);
9805
9806         tg3_napi_disable(tp);
9807         tg3_reset_task_cancel(tp);
9808
9809         netif_tx_stop_all_queues(dev);
9810
9811         del_timer_sync(&tp->timer);
9812
9813         tg3_phy_stop(tp);
9814
9815         tg3_full_lock(tp, 1);
9816
9817         tg3_disable_ints(tp);
9818
9819         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9820         tg3_free_rings(tp);
9821         tg3_flag_clear(tp, INIT_COMPLETE);
9822
9823         tg3_full_unlock(tp);
9824
9825         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9826                 struct tg3_napi *tnapi = &tp->napi[i];
9827                 free_irq(tnapi->irq_vec, tnapi);
9828         }
9829
9830         tg3_ints_fini(tp);
9831
9832         /* Clear stats across close / open calls */
9833         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
9834         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
9835
9836         tg3_napi_fini(tp);
9837
9838         tg3_free_consistent(tp);
9839
9840         tg3_power_down(tp);
9841
9842         netif_carrier_off(tp->dev);
9843
9844         return 0;
9845 }
9846
9847 static inline u64 get_stat64(tg3_stat64_t *val)
9848 {
9849        return ((u64)val->high << 32) | ((u64)val->low);
9850 }
9851
9852 static u64 calc_crc_errors(struct tg3 *tp)
9853 {
9854         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9855
9856         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9857             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9858              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9859                 u32 val;
9860
9861                 spin_lock_bh(&tp->lock);
9862                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9863                         tg3_writephy(tp, MII_TG3_TEST1,
9864                                      val | MII_TG3_TEST1_CRC_EN);
9865                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9866                 } else
9867                         val = 0;
9868                 spin_unlock_bh(&tp->lock);
9869
9870                 tp->phy_crc_errors += val;
9871
9872                 return tp->phy_crc_errors;
9873         }
9874
9875         return get_stat64(&hw_stats->rx_fcs_errors);
9876 }
9877
9878 #define ESTAT_ADD(member) \
9879         estats->member =        old_estats->member + \
9880                                 get_stat64(&hw_stats->member)
9881
9882 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
9883                                                struct tg3_ethtool_stats *estats)
9884 {
9885         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9886         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9887
9888         if (!hw_stats)
9889                 return old_estats;
9890
9891         ESTAT_ADD(rx_octets);
9892         ESTAT_ADD(rx_fragments);
9893         ESTAT_ADD(rx_ucast_packets);
9894         ESTAT_ADD(rx_mcast_packets);
9895         ESTAT_ADD(rx_bcast_packets);
9896         ESTAT_ADD(rx_fcs_errors);
9897         ESTAT_ADD(rx_align_errors);
9898         ESTAT_ADD(rx_xon_pause_rcvd);
9899         ESTAT_ADD(rx_xoff_pause_rcvd);
9900         ESTAT_ADD(rx_mac_ctrl_rcvd);
9901         ESTAT_ADD(rx_xoff_entered);
9902         ESTAT_ADD(rx_frame_too_long_errors);
9903         ESTAT_ADD(rx_jabbers);
9904         ESTAT_ADD(rx_undersize_packets);
9905         ESTAT_ADD(rx_in_length_errors);
9906         ESTAT_ADD(rx_out_length_errors);
9907         ESTAT_ADD(rx_64_or_less_octet_packets);
9908         ESTAT_ADD(rx_65_to_127_octet_packets);
9909         ESTAT_ADD(rx_128_to_255_octet_packets);
9910         ESTAT_ADD(rx_256_to_511_octet_packets);
9911         ESTAT_ADD(rx_512_to_1023_octet_packets);
9912         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9913         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9914         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9915         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9916         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9917
9918         ESTAT_ADD(tx_octets);
9919         ESTAT_ADD(tx_collisions);
9920         ESTAT_ADD(tx_xon_sent);
9921         ESTAT_ADD(tx_xoff_sent);
9922         ESTAT_ADD(tx_flow_control);
9923         ESTAT_ADD(tx_mac_errors);
9924         ESTAT_ADD(tx_single_collisions);
9925         ESTAT_ADD(tx_mult_collisions);
9926         ESTAT_ADD(tx_deferred);
9927         ESTAT_ADD(tx_excessive_collisions);
9928         ESTAT_ADD(tx_late_collisions);
9929         ESTAT_ADD(tx_collide_2times);
9930         ESTAT_ADD(tx_collide_3times);
9931         ESTAT_ADD(tx_collide_4times);
9932         ESTAT_ADD(tx_collide_5times);
9933         ESTAT_ADD(tx_collide_6times);
9934         ESTAT_ADD(tx_collide_7times);
9935         ESTAT_ADD(tx_collide_8times);
9936         ESTAT_ADD(tx_collide_9times);
9937         ESTAT_ADD(tx_collide_10times);
9938         ESTAT_ADD(tx_collide_11times);
9939         ESTAT_ADD(tx_collide_12times);
9940         ESTAT_ADD(tx_collide_13times);
9941         ESTAT_ADD(tx_collide_14times);
9942         ESTAT_ADD(tx_collide_15times);
9943         ESTAT_ADD(tx_ucast_packets);
9944         ESTAT_ADD(tx_mcast_packets);
9945         ESTAT_ADD(tx_bcast_packets);
9946         ESTAT_ADD(tx_carrier_sense_errors);
9947         ESTAT_ADD(tx_discards);
9948         ESTAT_ADD(tx_errors);
9949
9950         ESTAT_ADD(dma_writeq_full);
9951         ESTAT_ADD(dma_write_prioq_full);
9952         ESTAT_ADD(rxbds_empty);
9953         ESTAT_ADD(rx_discards);
9954         ESTAT_ADD(rx_errors);
9955         ESTAT_ADD(rx_threshold_hit);
9956
9957         ESTAT_ADD(dma_readq_full);
9958         ESTAT_ADD(dma_read_prioq_full);
9959         ESTAT_ADD(tx_comp_queue_full);
9960
9961         ESTAT_ADD(ring_set_send_prod_index);
9962         ESTAT_ADD(ring_status_update);
9963         ESTAT_ADD(nic_irqs);
9964         ESTAT_ADD(nic_avoided_irqs);
9965         ESTAT_ADD(nic_tx_threshold_hit);
9966
9967         ESTAT_ADD(mbuf_lwm_thresh_hit);
9968
9969         return estats;
9970 }
9971
9972 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9973                                                  struct rtnl_link_stats64 *stats)
9974 {
9975         struct tg3 *tp = netdev_priv(dev);
9976         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9977         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9978
9979         if (!hw_stats)
9980                 return old_stats;
9981
9982         stats->rx_packets = old_stats->rx_packets +
9983                 get_stat64(&hw_stats->rx_ucast_packets) +
9984                 get_stat64(&hw_stats->rx_mcast_packets) +
9985                 get_stat64(&hw_stats->rx_bcast_packets);
9986
9987         stats->tx_packets = old_stats->tx_packets +
9988                 get_stat64(&hw_stats->tx_ucast_packets) +
9989                 get_stat64(&hw_stats->tx_mcast_packets) +
9990                 get_stat64(&hw_stats->tx_bcast_packets);
9991
9992         stats->rx_bytes = old_stats->rx_bytes +
9993                 get_stat64(&hw_stats->rx_octets);
9994         stats->tx_bytes = old_stats->tx_bytes +
9995                 get_stat64(&hw_stats->tx_octets);
9996
9997         stats->rx_errors = old_stats->rx_errors +
9998                 get_stat64(&hw_stats->rx_errors);
9999         stats->tx_errors = old_stats->tx_errors +
10000                 get_stat64(&hw_stats->tx_errors) +
10001                 get_stat64(&hw_stats->tx_mac_errors) +
10002                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10003                 get_stat64(&hw_stats->tx_discards);
10004
10005         stats->multicast = old_stats->multicast +
10006                 get_stat64(&hw_stats->rx_mcast_packets);
10007         stats->collisions = old_stats->collisions +
10008                 get_stat64(&hw_stats->tx_collisions);
10009
10010         stats->rx_length_errors = old_stats->rx_length_errors +
10011                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10012                 get_stat64(&hw_stats->rx_undersize_packets);
10013
10014         stats->rx_over_errors = old_stats->rx_over_errors +
10015                 get_stat64(&hw_stats->rxbds_empty);
10016         stats->rx_frame_errors = old_stats->rx_frame_errors +
10017                 get_stat64(&hw_stats->rx_align_errors);
10018         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10019                 get_stat64(&hw_stats->tx_discards);
10020         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10021                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10022
10023         stats->rx_crc_errors = old_stats->rx_crc_errors +
10024                 calc_crc_errors(tp);
10025
10026         stats->rx_missed_errors = old_stats->rx_missed_errors +
10027                 get_stat64(&hw_stats->rx_discards);
10028
10029         stats->rx_dropped = tp->rx_dropped;
10030         stats->tx_dropped = tp->tx_dropped;
10031
10032         return stats;
10033 }
10034
10035 static inline u32 calc_crc(unsigned char *buf, int len)
10036 {
10037         u32 reg;
10038         u32 tmp;
10039         int j, k;
10040
10041         reg = 0xffffffff;
10042
10043         for (j = 0; j < len; j++) {
10044                 reg ^= buf[j];
10045
10046                 for (k = 0; k < 8; k++) {
10047                         tmp = reg & 0x01;
10048
10049                         reg >>= 1;
10050
10051                         if (tmp)
10052                                 reg ^= 0xedb88320;
10053                 }
10054         }
10055
10056         return ~reg;
10057 }
10058
10059 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10060 {
10061         /* accept or reject all multicast frames */
10062         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10063         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10064         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10065         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10066 }
10067
10068 static void __tg3_set_rx_mode(struct net_device *dev)
10069 {
10070         struct tg3 *tp = netdev_priv(dev);
10071         u32 rx_mode;
10072
10073         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10074                                   RX_MODE_KEEP_VLAN_TAG);
10075
10076 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10077         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10078          * flag clear.
10079          */
10080         if (!tg3_flag(tp, ENABLE_ASF))
10081                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10082 #endif
10083
10084         if (dev->flags & IFF_PROMISC) {
10085                 /* Promiscuous mode. */
10086                 rx_mode |= RX_MODE_PROMISC;
10087         } else if (dev->flags & IFF_ALLMULTI) {
10088                 /* Accept all multicast. */
10089                 tg3_set_multi(tp, 1);
10090         } else if (netdev_mc_empty(dev)) {
10091                 /* Reject all multicast. */
10092                 tg3_set_multi(tp, 0);
10093         } else {
10094                 /* Accept one or more multicast(s). */
10095                 struct netdev_hw_addr *ha;
10096                 u32 mc_filter[4] = { 0, };
10097                 u32 regidx;
10098                 u32 bit;
10099                 u32 crc;
10100
10101                 netdev_for_each_mc_addr(ha, dev) {
10102                         crc = calc_crc(ha->addr, ETH_ALEN);
10103                         bit = ~crc & 0x7f;
10104                         regidx = (bit & 0x60) >> 5;
10105                         bit &= 0x1f;
10106                         mc_filter[regidx] |= (1 << bit);
10107                 }
10108
10109                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10110                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10111                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10112                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10113         }
10114
10115         if (rx_mode != tp->rx_mode) {
10116                 tp->rx_mode = rx_mode;
10117                 tw32_f(MAC_RX_MODE, rx_mode);
10118                 udelay(10);
10119         }
10120 }
10121
10122 static void tg3_set_rx_mode(struct net_device *dev)
10123 {
10124         struct tg3 *tp = netdev_priv(dev);
10125
10126         if (!netif_running(dev))
10127                 return;
10128
10129         tg3_full_lock(tp, 0);
10130         __tg3_set_rx_mode(dev);
10131         tg3_full_unlock(tp);
10132 }
10133
10134 static int tg3_get_regs_len(struct net_device *dev)
10135 {
10136         return TG3_REG_BLK_SIZE;
10137 }
10138
10139 static void tg3_get_regs(struct net_device *dev,
10140                 struct ethtool_regs *regs, void *_p)
10141 {
10142         struct tg3 *tp = netdev_priv(dev);
10143
10144         regs->version = 0;
10145
10146         memset(_p, 0, TG3_REG_BLK_SIZE);
10147
10148         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10149                 return;
10150
10151         tg3_full_lock(tp, 0);
10152
10153         tg3_dump_legacy_regs(tp, (u32 *)_p);
10154
10155         tg3_full_unlock(tp);
10156 }
10157
10158 static int tg3_get_eeprom_len(struct net_device *dev)
10159 {
10160         struct tg3 *tp = netdev_priv(dev);
10161
10162         return tp->nvram_size;
10163 }
10164
10165 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10166 {
10167         struct tg3 *tp = netdev_priv(dev);
10168         int ret;
10169         u8  *pd;
10170         u32 i, offset, len, b_offset, b_count;
10171         __be32 val;
10172
10173         if (tg3_flag(tp, NO_NVRAM))
10174                 return -EINVAL;
10175
10176         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10177                 return -EAGAIN;
10178
10179         offset = eeprom->offset;
10180         len = eeprom->len;
10181         eeprom->len = 0;
10182
10183         eeprom->magic = TG3_EEPROM_MAGIC;
10184
10185         if (offset & 3) {
10186                 /* adjustments to start on required 4 byte boundary */
10187                 b_offset = offset & 3;
10188                 b_count = 4 - b_offset;
10189                 if (b_count > len) {
10190                         /* i.e. offset=1 len=2 */
10191                         b_count = len;
10192                 }
10193                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10194                 if (ret)
10195                         return ret;
10196                 memcpy(data, ((char *)&val) + b_offset, b_count);
10197                 len -= b_count;
10198                 offset += b_count;
10199                 eeprom->len += b_count;
10200         }
10201
10202         /* read bytes up to the last 4 byte boundary */
10203         pd = &data[eeprom->len];
10204         for (i = 0; i < (len - (len & 3)); i += 4) {
10205                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10206                 if (ret) {
10207                         eeprom->len += i;
10208                         return ret;
10209                 }
10210                 memcpy(pd + i, &val, 4);
10211         }
10212         eeprom->len += i;
10213
10214         if (len & 3) {
10215                 /* read last bytes not ending on 4 byte boundary */
10216                 pd = &data[eeprom->len];
10217                 b_count = len & 3;
10218                 b_offset = offset + len - b_count;
10219                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10220                 if (ret)
10221                         return ret;
10222                 memcpy(pd, &val, b_count);
10223                 eeprom->len += b_count;
10224         }
10225         return 0;
10226 }
10227
10228 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10229
10230 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10231 {
10232         struct tg3 *tp = netdev_priv(dev);
10233         int ret;
10234         u32 offset, len, b_offset, odd_len;
10235         u8 *buf;
10236         __be32 start, end;
10237
10238         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10239                 return -EAGAIN;
10240
10241         if (tg3_flag(tp, NO_NVRAM) ||
10242             eeprom->magic != TG3_EEPROM_MAGIC)
10243                 return -EINVAL;
10244
10245         offset = eeprom->offset;
10246         len = eeprom->len;
10247
10248         if ((b_offset = (offset & 3))) {
10249                 /* adjustments to start on required 4 byte boundary */
10250                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10251                 if (ret)
10252                         return ret;
10253                 len += b_offset;
10254                 offset &= ~3;
10255                 if (len < 4)
10256                         len = 4;
10257         }
10258
10259         odd_len = 0;
10260         if (len & 3) {
10261                 /* adjustments to end on required 4 byte boundary */
10262                 odd_len = 1;
10263                 len = (len + 3) & ~3;
10264                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10265                 if (ret)
10266                         return ret;
10267         }
10268
10269         buf = data;
10270         if (b_offset || odd_len) {
10271                 buf = kmalloc(len, GFP_KERNEL);
10272                 if (!buf)
10273                         return -ENOMEM;
10274                 if (b_offset)
10275                         memcpy(buf, &start, 4);
10276                 if (odd_len)
10277                         memcpy(buf+len-4, &end, 4);
10278                 memcpy(buf + b_offset, data, eeprom->len);
10279         }
10280
10281         ret = tg3_nvram_write_block(tp, offset, len, buf);
10282
10283         if (buf != data)
10284                 kfree(buf);
10285
10286         return ret;
10287 }
10288
10289 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10290 {
10291         struct tg3 *tp = netdev_priv(dev);
10292
10293         if (tg3_flag(tp, USE_PHYLIB)) {
10294                 struct phy_device *phydev;
10295                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10296                         return -EAGAIN;
10297                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10298                 return phy_ethtool_gset(phydev, cmd);
10299         }
10300
10301         cmd->supported = (SUPPORTED_Autoneg);
10302
10303         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10304                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10305                                    SUPPORTED_1000baseT_Full);
10306
10307         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10308                 cmd->supported |= (SUPPORTED_100baseT_Half |
10309                                   SUPPORTED_100baseT_Full |
10310                                   SUPPORTED_10baseT_Half |
10311                                   SUPPORTED_10baseT_Full |
10312                                   SUPPORTED_TP);
10313                 cmd->port = PORT_TP;
10314         } else {
10315                 cmd->supported |= SUPPORTED_FIBRE;
10316                 cmd->port = PORT_FIBRE;
10317         }
10318
10319         cmd->advertising = tp->link_config.advertising;
10320         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10321                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10322                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10323                                 cmd->advertising |= ADVERTISED_Pause;
10324                         } else {
10325                                 cmd->advertising |= ADVERTISED_Pause |
10326                                                     ADVERTISED_Asym_Pause;
10327                         }
10328                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10329                         cmd->advertising |= ADVERTISED_Asym_Pause;
10330                 }
10331         }
10332         if (netif_running(dev) && netif_carrier_ok(dev)) {
10333                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10334                 cmd->duplex = tp->link_config.active_duplex;
10335                 cmd->lp_advertising = tp->link_config.rmt_adv;
10336                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10337                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10338                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10339                         else
10340                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10341                 }
10342         } else {
10343                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10344                 cmd->duplex = DUPLEX_INVALID;
10345                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10346         }
10347         cmd->phy_address = tp->phy_addr;
10348         cmd->transceiver = XCVR_INTERNAL;
10349         cmd->autoneg = tp->link_config.autoneg;
10350         cmd->maxtxpkt = 0;
10351         cmd->maxrxpkt = 0;
10352         return 0;
10353 }
10354
10355 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10356 {
10357         struct tg3 *tp = netdev_priv(dev);
10358         u32 speed = ethtool_cmd_speed(cmd);
10359
10360         if (tg3_flag(tp, USE_PHYLIB)) {
10361                 struct phy_device *phydev;
10362                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10363                         return -EAGAIN;
10364                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10365                 return phy_ethtool_sset(phydev, cmd);
10366         }
10367
10368         if (cmd->autoneg != AUTONEG_ENABLE &&
10369             cmd->autoneg != AUTONEG_DISABLE)
10370                 return -EINVAL;
10371
10372         if (cmd->autoneg == AUTONEG_DISABLE &&
10373             cmd->duplex != DUPLEX_FULL &&
10374             cmd->duplex != DUPLEX_HALF)
10375                 return -EINVAL;
10376
10377         if (cmd->autoneg == AUTONEG_ENABLE) {
10378                 u32 mask = ADVERTISED_Autoneg |
10379                            ADVERTISED_Pause |
10380                            ADVERTISED_Asym_Pause;
10381
10382                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10383                         mask |= ADVERTISED_1000baseT_Half |
10384                                 ADVERTISED_1000baseT_Full;
10385
10386                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10387                         mask |= ADVERTISED_100baseT_Half |
10388                                 ADVERTISED_100baseT_Full |
10389                                 ADVERTISED_10baseT_Half |
10390                                 ADVERTISED_10baseT_Full |
10391                                 ADVERTISED_TP;
10392                 else
10393                         mask |= ADVERTISED_FIBRE;
10394
10395                 if (cmd->advertising & ~mask)
10396                         return -EINVAL;
10397
10398                 mask &= (ADVERTISED_1000baseT_Half |
10399                          ADVERTISED_1000baseT_Full |
10400                          ADVERTISED_100baseT_Half |
10401                          ADVERTISED_100baseT_Full |
10402                          ADVERTISED_10baseT_Half |
10403                          ADVERTISED_10baseT_Full);
10404
10405                 cmd->advertising &= mask;
10406         } else {
10407                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10408                         if (speed != SPEED_1000)
10409                                 return -EINVAL;
10410
10411                         if (cmd->duplex != DUPLEX_FULL)
10412                                 return -EINVAL;
10413                 } else {
10414                         if (speed != SPEED_100 &&
10415                             speed != SPEED_10)
10416                                 return -EINVAL;
10417                 }
10418         }
10419
10420         tg3_full_lock(tp, 0);
10421
10422         tp->link_config.autoneg = cmd->autoneg;
10423         if (cmd->autoneg == AUTONEG_ENABLE) {
10424                 tp->link_config.advertising = (cmd->advertising |
10425                                               ADVERTISED_Autoneg);
10426                 tp->link_config.speed = SPEED_INVALID;
10427                 tp->link_config.duplex = DUPLEX_INVALID;
10428         } else {
10429                 tp->link_config.advertising = 0;
10430                 tp->link_config.speed = speed;
10431                 tp->link_config.duplex = cmd->duplex;
10432         }
10433
10434         tp->link_config.orig_speed = tp->link_config.speed;
10435         tp->link_config.orig_duplex = tp->link_config.duplex;
10436         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10437
10438         if (netif_running(dev))
10439                 tg3_setup_phy(tp, 1);
10440
10441         tg3_full_unlock(tp);
10442
10443         return 0;
10444 }
10445
10446 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10447 {
10448         struct tg3 *tp = netdev_priv(dev);
10449
10450         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10451         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10452         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10453         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10454 }
10455
10456 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10457 {
10458         struct tg3 *tp = netdev_priv(dev);
10459
10460         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10461                 wol->supported = WAKE_MAGIC;
10462         else
10463                 wol->supported = 0;
10464         wol->wolopts = 0;
10465         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10466                 wol->wolopts = WAKE_MAGIC;
10467         memset(&wol->sopass, 0, sizeof(wol->sopass));
10468 }
10469
10470 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10471 {
10472         struct tg3 *tp = netdev_priv(dev);
10473         struct device *dp = &tp->pdev->dev;
10474
10475         if (wol->wolopts & ~WAKE_MAGIC)
10476                 return -EINVAL;
10477         if ((wol->wolopts & WAKE_MAGIC) &&
10478             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10479                 return -EINVAL;
10480
10481         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10482
10483         spin_lock_bh(&tp->lock);
10484         if (device_may_wakeup(dp))
10485                 tg3_flag_set(tp, WOL_ENABLE);
10486         else
10487                 tg3_flag_clear(tp, WOL_ENABLE);
10488         spin_unlock_bh(&tp->lock);
10489
10490         return 0;
10491 }
10492
10493 static u32 tg3_get_msglevel(struct net_device *dev)
10494 {
10495         struct tg3 *tp = netdev_priv(dev);
10496         return tp->msg_enable;
10497 }
10498
10499 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10500 {
10501         struct tg3 *tp = netdev_priv(dev);
10502         tp->msg_enable = value;
10503 }
10504
10505 static int tg3_nway_reset(struct net_device *dev)
10506 {
10507         struct tg3 *tp = netdev_priv(dev);
10508         int r;
10509
10510         if (!netif_running(dev))
10511                 return -EAGAIN;
10512
10513         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10514                 return -EINVAL;
10515
10516         if (tg3_flag(tp, USE_PHYLIB)) {
10517                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10518                         return -EAGAIN;
10519                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10520         } else {
10521                 u32 bmcr;
10522
10523                 spin_lock_bh(&tp->lock);
10524                 r = -EINVAL;
10525                 tg3_readphy(tp, MII_BMCR, &bmcr);
10526                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10527                     ((bmcr & BMCR_ANENABLE) ||
10528                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10529                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10530                                                    BMCR_ANENABLE);
10531                         r = 0;
10532                 }
10533                 spin_unlock_bh(&tp->lock);
10534         }
10535
10536         return r;
10537 }
10538
10539 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10540 {
10541         struct tg3 *tp = netdev_priv(dev);
10542
10543         ering->rx_max_pending = tp->rx_std_ring_mask;
10544         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10545                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10546         else
10547                 ering->rx_jumbo_max_pending = 0;
10548
10549         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10550
10551         ering->rx_pending = tp->rx_pending;
10552         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10553                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10554         else
10555                 ering->rx_jumbo_pending = 0;
10556
10557         ering->tx_pending = tp->napi[0].tx_pending;
10558 }
10559
10560 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10561 {
10562         struct tg3 *tp = netdev_priv(dev);
10563         int i, irq_sync = 0, err = 0;
10564
10565         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10566             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10567             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10568             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10569             (tg3_flag(tp, TSO_BUG) &&
10570              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10571                 return -EINVAL;
10572
10573         if (netif_running(dev)) {
10574                 tg3_phy_stop(tp);
10575                 tg3_netif_stop(tp);
10576                 irq_sync = 1;
10577         }
10578
10579         tg3_full_lock(tp, irq_sync);
10580
10581         tp->rx_pending = ering->rx_pending;
10582
10583         if (tg3_flag(tp, MAX_RXPEND_64) &&
10584             tp->rx_pending > 63)
10585                 tp->rx_pending = 63;
10586         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10587
10588         for (i = 0; i < tp->irq_max; i++)
10589                 tp->napi[i].tx_pending = ering->tx_pending;
10590
10591         if (netif_running(dev)) {
10592                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10593                 err = tg3_restart_hw(tp, 1);
10594                 if (!err)
10595                         tg3_netif_start(tp);
10596         }
10597
10598         tg3_full_unlock(tp);
10599
10600         if (irq_sync && !err)
10601                 tg3_phy_start(tp);
10602
10603         return err;
10604 }
10605
10606 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10607 {
10608         struct tg3 *tp = netdev_priv(dev);
10609
10610         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10611
10612         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10613                 epause->rx_pause = 1;
10614         else
10615                 epause->rx_pause = 0;
10616
10617         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10618                 epause->tx_pause = 1;
10619         else
10620                 epause->tx_pause = 0;
10621 }
10622
10623 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10624 {
10625         struct tg3 *tp = netdev_priv(dev);
10626         int err = 0;
10627
10628         if (tg3_flag(tp, USE_PHYLIB)) {
10629                 u32 newadv;
10630                 struct phy_device *phydev;
10631
10632                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10633
10634                 if (!(phydev->supported & SUPPORTED_Pause) ||
10635                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10636                      (epause->rx_pause != epause->tx_pause)))
10637                         return -EINVAL;
10638
10639                 tp->link_config.flowctrl = 0;
10640                 if (epause->rx_pause) {
10641                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10642
10643                         if (epause->tx_pause) {
10644                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10645                                 newadv = ADVERTISED_Pause;
10646                         } else
10647                                 newadv = ADVERTISED_Pause |
10648                                          ADVERTISED_Asym_Pause;
10649                 } else if (epause->tx_pause) {
10650                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10651                         newadv = ADVERTISED_Asym_Pause;
10652                 } else
10653                         newadv = 0;
10654
10655                 if (epause->autoneg)
10656                         tg3_flag_set(tp, PAUSE_AUTONEG);
10657                 else
10658                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10659
10660                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10661                         u32 oldadv = phydev->advertising &
10662                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10663                         if (oldadv != newadv) {
10664                                 phydev->advertising &=
10665                                         ~(ADVERTISED_Pause |
10666                                           ADVERTISED_Asym_Pause);
10667                                 phydev->advertising |= newadv;
10668                                 if (phydev->autoneg) {
10669                                         /*
10670                                          * Always renegotiate the link to
10671                                          * inform our link partner of our
10672                                          * flow control settings, even if the
10673                                          * flow control is forced.  Let
10674                                          * tg3_adjust_link() do the final
10675                                          * flow control setup.
10676                                          */
10677                                         return phy_start_aneg(phydev);
10678                                 }
10679                         }
10680
10681                         if (!epause->autoneg)
10682                                 tg3_setup_flow_control(tp, 0, 0);
10683                 } else {
10684                         tp->link_config.orig_advertising &=
10685                                         ~(ADVERTISED_Pause |
10686                                           ADVERTISED_Asym_Pause);
10687                         tp->link_config.orig_advertising |= newadv;
10688                 }
10689         } else {
10690                 int irq_sync = 0;
10691
10692                 if (netif_running(dev)) {
10693                         tg3_netif_stop(tp);
10694                         irq_sync = 1;
10695                 }
10696
10697                 tg3_full_lock(tp, irq_sync);
10698
10699                 if (epause->autoneg)
10700                         tg3_flag_set(tp, PAUSE_AUTONEG);
10701                 else
10702                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10703                 if (epause->rx_pause)
10704                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10705                 else
10706                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10707                 if (epause->tx_pause)
10708                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10709                 else
10710                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10711
10712                 if (netif_running(dev)) {
10713                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10714                         err = tg3_restart_hw(tp, 1);
10715                         if (!err)
10716                                 tg3_netif_start(tp);
10717                 }
10718
10719                 tg3_full_unlock(tp);
10720         }
10721
10722         return err;
10723 }
10724
10725 static int tg3_get_sset_count(struct net_device *dev, int sset)
10726 {
10727         switch (sset) {
10728         case ETH_SS_TEST:
10729                 return TG3_NUM_TEST;
10730         case ETH_SS_STATS:
10731                 return TG3_NUM_STATS;
10732         default:
10733                 return -EOPNOTSUPP;
10734         }
10735 }
10736
10737 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10738                          u32 *rules __always_unused)
10739 {
10740         struct tg3 *tp = netdev_priv(dev);
10741
10742         if (!tg3_flag(tp, SUPPORT_MSIX))
10743                 return -EOPNOTSUPP;
10744
10745         switch (info->cmd) {
10746         case ETHTOOL_GRXRINGS:
10747                 if (netif_running(tp->dev))
10748                         info->data = tp->irq_cnt;
10749                 else {
10750                         info->data = num_online_cpus();
10751                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10752                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10753                 }
10754
10755                 /* The first interrupt vector only
10756                  * handles link interrupts.
10757                  */
10758                 info->data -= 1;
10759                 return 0;
10760
10761         default:
10762                 return -EOPNOTSUPP;
10763         }
10764 }
10765
10766 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10767 {
10768         u32 size = 0;
10769         struct tg3 *tp = netdev_priv(dev);
10770
10771         if (tg3_flag(tp, SUPPORT_MSIX))
10772                 size = TG3_RSS_INDIR_TBL_SIZE;
10773
10774         return size;
10775 }
10776
10777 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10778 {
10779         struct tg3 *tp = netdev_priv(dev);
10780         int i;
10781
10782         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10783                 indir[i] = tp->rss_ind_tbl[i];
10784
10785         return 0;
10786 }
10787
10788 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10789 {
10790         struct tg3 *tp = netdev_priv(dev);
10791         size_t i;
10792
10793         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10794                 tp->rss_ind_tbl[i] = indir[i];
10795
10796         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10797                 return 0;
10798
10799         /* It is legal to write the indirection
10800          * table while the device is running.
10801          */
10802         tg3_full_lock(tp, 0);
10803         tg3_rss_write_indir_tbl(tp);
10804         tg3_full_unlock(tp);
10805
10806         return 0;
10807 }
10808
10809 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10810 {
10811         switch (stringset) {
10812         case ETH_SS_STATS:
10813                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10814                 break;
10815         case ETH_SS_TEST:
10816                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10817                 break;
10818         default:
10819                 WARN_ON(1);     /* we need a WARN() */
10820                 break;
10821         }
10822 }
10823
10824 static int tg3_set_phys_id(struct net_device *dev,
10825                             enum ethtool_phys_id_state state)
10826 {
10827         struct tg3 *tp = netdev_priv(dev);
10828
10829         if (!netif_running(tp->dev))
10830                 return -EAGAIN;
10831
10832         switch (state) {
10833         case ETHTOOL_ID_ACTIVE:
10834                 return 1;       /* cycle on/off once per second */
10835
10836         case ETHTOOL_ID_ON:
10837                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10838                      LED_CTRL_1000MBPS_ON |
10839                      LED_CTRL_100MBPS_ON |
10840                      LED_CTRL_10MBPS_ON |
10841                      LED_CTRL_TRAFFIC_OVERRIDE |
10842                      LED_CTRL_TRAFFIC_BLINK |
10843                      LED_CTRL_TRAFFIC_LED);
10844                 break;
10845
10846         case ETHTOOL_ID_OFF:
10847                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10848                      LED_CTRL_TRAFFIC_OVERRIDE);
10849                 break;
10850
10851         case ETHTOOL_ID_INACTIVE:
10852                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10853                 break;
10854         }
10855
10856         return 0;
10857 }
10858
10859 static void tg3_get_ethtool_stats(struct net_device *dev,
10860                                    struct ethtool_stats *estats, u64 *tmp_stats)
10861 {
10862         struct tg3 *tp = netdev_priv(dev);
10863
10864         tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
10865 }
10866
10867 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10868 {
10869         int i;
10870         __be32 *buf;
10871         u32 offset = 0, len = 0;
10872         u32 magic, val;
10873
10874         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10875                 return NULL;
10876
10877         if (magic == TG3_EEPROM_MAGIC) {
10878                 for (offset = TG3_NVM_DIR_START;
10879                      offset < TG3_NVM_DIR_END;
10880                      offset += TG3_NVM_DIRENT_SIZE) {
10881                         if (tg3_nvram_read(tp, offset, &val))
10882                                 return NULL;
10883
10884                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10885                             TG3_NVM_DIRTYPE_EXTVPD)
10886                                 break;
10887                 }
10888
10889                 if (offset != TG3_NVM_DIR_END) {
10890                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10891                         if (tg3_nvram_read(tp, offset + 4, &offset))
10892                                 return NULL;
10893
10894                         offset = tg3_nvram_logical_addr(tp, offset);
10895                 }
10896         }
10897
10898         if (!offset || !len) {
10899                 offset = TG3_NVM_VPD_OFF;
10900                 len = TG3_NVM_VPD_LEN;
10901         }
10902
10903         buf = kmalloc(len, GFP_KERNEL);
10904         if (buf == NULL)
10905                 return NULL;
10906
10907         if (magic == TG3_EEPROM_MAGIC) {
10908                 for (i = 0; i < len; i += 4) {
10909                         /* The data is in little-endian format in NVRAM.
10910                          * Use the big-endian read routines to preserve
10911                          * the byte order as it exists in NVRAM.
10912                          */
10913                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10914                                 goto error;
10915                 }
10916         } else {
10917                 u8 *ptr;
10918                 ssize_t cnt;
10919                 unsigned int pos = 0;
10920
10921                 ptr = (u8 *)&buf[0];
10922                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10923                         cnt = pci_read_vpd(tp->pdev, pos,
10924                                            len - pos, ptr);
10925                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10926                                 cnt = 0;
10927                         else if (cnt < 0)
10928                                 goto error;
10929                 }
10930                 if (pos != len)
10931                         goto error;
10932         }
10933
10934         *vpdlen = len;
10935
10936         return buf;
10937
10938 error:
10939         kfree(buf);
10940         return NULL;
10941 }
10942
10943 #define NVRAM_TEST_SIZE 0x100
10944 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10945 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10946 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10947 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10948 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10949 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10950 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10951 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10952
10953 static int tg3_test_nvram(struct tg3 *tp)
10954 {
10955         u32 csum, magic, len;
10956         __be32 *buf;
10957         int i, j, k, err = 0, size;
10958
10959         if (tg3_flag(tp, NO_NVRAM))
10960                 return 0;
10961
10962         if (tg3_nvram_read(tp, 0, &magic) != 0)
10963                 return -EIO;
10964
10965         if (magic == TG3_EEPROM_MAGIC)
10966                 size = NVRAM_TEST_SIZE;
10967         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10968                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10969                     TG3_EEPROM_SB_FORMAT_1) {
10970                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10971                         case TG3_EEPROM_SB_REVISION_0:
10972                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10973                                 break;
10974                         case TG3_EEPROM_SB_REVISION_2:
10975                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10976                                 break;
10977                         case TG3_EEPROM_SB_REVISION_3:
10978                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10979                                 break;
10980                         case TG3_EEPROM_SB_REVISION_4:
10981                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10982                                 break;
10983                         case TG3_EEPROM_SB_REVISION_5:
10984                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10985                                 break;
10986                         case TG3_EEPROM_SB_REVISION_6:
10987                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10988                                 break;
10989                         default:
10990                                 return -EIO;
10991                         }
10992                 } else
10993                         return 0;
10994         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10995                 size = NVRAM_SELFBOOT_HW_SIZE;
10996         else
10997                 return -EIO;
10998
10999         buf = kmalloc(size, GFP_KERNEL);
11000         if (buf == NULL)
11001                 return -ENOMEM;
11002
11003         err = -EIO;
11004         for (i = 0, j = 0; i < size; i += 4, j++) {
11005                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11006                 if (err)
11007                         break;
11008         }
11009         if (i < size)
11010                 goto out;
11011
11012         /* Selfboot format */
11013         magic = be32_to_cpu(buf[0]);
11014         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11015             TG3_EEPROM_MAGIC_FW) {
11016                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11017
11018                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11019                     TG3_EEPROM_SB_REVISION_2) {
11020                         /* For rev 2, the csum doesn't include the MBA. */
11021                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11022                                 csum8 += buf8[i];
11023                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11024                                 csum8 += buf8[i];
11025                 } else {
11026                         for (i = 0; i < size; i++)
11027                                 csum8 += buf8[i];
11028                 }
11029
11030                 if (csum8 == 0) {
11031                         err = 0;
11032                         goto out;
11033                 }
11034
11035                 err = -EIO;
11036                 goto out;
11037         }
11038
11039         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11040             TG3_EEPROM_MAGIC_HW) {
11041                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11042                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11043                 u8 *buf8 = (u8 *) buf;
11044
11045                 /* Separate the parity bits and the data bytes.  */
11046                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11047                         if ((i == 0) || (i == 8)) {
11048                                 int l;
11049                                 u8 msk;
11050
11051                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11052                                         parity[k++] = buf8[i] & msk;
11053                                 i++;
11054                         } else if (i == 16) {
11055                                 int l;
11056                                 u8 msk;
11057
11058                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11059                                         parity[k++] = buf8[i] & msk;
11060                                 i++;
11061
11062                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11063                                         parity[k++] = buf8[i] & msk;
11064                                 i++;
11065                         }
11066                         data[j++] = buf8[i];
11067                 }
11068
11069                 err = -EIO;
11070                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11071                         u8 hw8 = hweight8(data[i]);
11072
11073                         if ((hw8 & 0x1) && parity[i])
11074                                 goto out;
11075                         else if (!(hw8 & 0x1) && !parity[i])
11076                                 goto out;
11077                 }
11078                 err = 0;
11079                 goto out;
11080         }
11081
11082         err = -EIO;
11083
11084         /* Bootstrap checksum at offset 0x10 */
11085         csum = calc_crc((unsigned char *) buf, 0x10);
11086         if (csum != le32_to_cpu(buf[0x10/4]))
11087                 goto out;
11088
11089         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11090         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11091         if (csum != le32_to_cpu(buf[0xfc/4]))
11092                 goto out;
11093
11094         kfree(buf);
11095
11096         buf = tg3_vpd_readblock(tp, &len);
11097         if (!buf)
11098                 return -ENOMEM;
11099
11100         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11101         if (i > 0) {
11102                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11103                 if (j < 0)
11104                         goto out;
11105
11106                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11107                         goto out;
11108
11109                 i += PCI_VPD_LRDT_TAG_SIZE;
11110                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11111                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11112                 if (j > 0) {
11113                         u8 csum8 = 0;
11114
11115                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11116
11117                         for (i = 0; i <= j; i++)
11118                                 csum8 += ((u8 *)buf)[i];
11119
11120                         if (csum8)
11121                                 goto out;
11122                 }
11123         }
11124
11125         err = 0;
11126
11127 out:
11128         kfree(buf);
11129         return err;
11130 }
11131
11132 #define TG3_SERDES_TIMEOUT_SEC  2
11133 #define TG3_COPPER_TIMEOUT_SEC  6
11134
11135 static int tg3_test_link(struct tg3 *tp)
11136 {
11137         int i, max;
11138
11139         if (!netif_running(tp->dev))
11140                 return -ENODEV;
11141
11142         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11143                 max = TG3_SERDES_TIMEOUT_SEC;
11144         else
11145                 max = TG3_COPPER_TIMEOUT_SEC;
11146
11147         for (i = 0; i < max; i++) {
11148                 if (netif_carrier_ok(tp->dev))
11149                         return 0;
11150
11151                 if (msleep_interruptible(1000))
11152                         break;
11153         }
11154
11155         return -EIO;
11156 }
11157
11158 /* Only test the commonly used registers */
11159 static int tg3_test_registers(struct tg3 *tp)
11160 {
11161         int i, is_5705, is_5750;
11162         u32 offset, read_mask, write_mask, val, save_val, read_val;
11163         static struct {
11164                 u16 offset;
11165                 u16 flags;
11166 #define TG3_FL_5705     0x1
11167 #define TG3_FL_NOT_5705 0x2
11168 #define TG3_FL_NOT_5788 0x4
11169 #define TG3_FL_NOT_5750 0x8
11170                 u32 read_mask;
11171                 u32 write_mask;
11172         } reg_tbl[] = {
11173                 /* MAC Control Registers */
11174                 { MAC_MODE, TG3_FL_NOT_5705,
11175                         0x00000000, 0x00ef6f8c },
11176                 { MAC_MODE, TG3_FL_5705,
11177                         0x00000000, 0x01ef6b8c },
11178                 { MAC_STATUS, TG3_FL_NOT_5705,
11179                         0x03800107, 0x00000000 },
11180                 { MAC_STATUS, TG3_FL_5705,
11181                         0x03800100, 0x00000000 },
11182                 { MAC_ADDR_0_HIGH, 0x0000,
11183                         0x00000000, 0x0000ffff },
11184                 { MAC_ADDR_0_LOW, 0x0000,
11185                         0x00000000, 0xffffffff },
11186                 { MAC_RX_MTU_SIZE, 0x0000,
11187                         0x00000000, 0x0000ffff },
11188                 { MAC_TX_MODE, 0x0000,
11189                         0x00000000, 0x00000070 },
11190                 { MAC_TX_LENGTHS, 0x0000,
11191                         0x00000000, 0x00003fff },
11192                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11193                         0x00000000, 0x000007fc },
11194                 { MAC_RX_MODE, TG3_FL_5705,
11195                         0x00000000, 0x000007dc },
11196                 { MAC_HASH_REG_0, 0x0000,
11197                         0x00000000, 0xffffffff },
11198                 { MAC_HASH_REG_1, 0x0000,
11199                         0x00000000, 0xffffffff },
11200                 { MAC_HASH_REG_2, 0x0000,
11201                         0x00000000, 0xffffffff },
11202                 { MAC_HASH_REG_3, 0x0000,
11203                         0x00000000, 0xffffffff },
11204
11205                 /* Receive Data and Receive BD Initiator Control Registers. */
11206                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11207                         0x00000000, 0xffffffff },
11208                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11209                         0x00000000, 0xffffffff },
11210                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11211                         0x00000000, 0x00000003 },
11212                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11213                         0x00000000, 0xffffffff },
11214                 { RCVDBDI_STD_BD+0, 0x0000,
11215                         0x00000000, 0xffffffff },
11216                 { RCVDBDI_STD_BD+4, 0x0000,
11217                         0x00000000, 0xffffffff },
11218                 { RCVDBDI_STD_BD+8, 0x0000,
11219                         0x00000000, 0xffff0002 },
11220                 { RCVDBDI_STD_BD+0xc, 0x0000,
11221                         0x00000000, 0xffffffff },
11222
11223                 /* Receive BD Initiator Control Registers. */
11224                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11225                         0x00000000, 0xffffffff },
11226                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11227                         0x00000000, 0x000003ff },
11228                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11229                         0x00000000, 0xffffffff },
11230
11231                 /* Host Coalescing Control Registers. */
11232                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11233                         0x00000000, 0x00000004 },
11234                 { HOSTCC_MODE, TG3_FL_5705,
11235                         0x00000000, 0x000000f6 },
11236                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11237                         0x00000000, 0xffffffff },
11238                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11239                         0x00000000, 0x000003ff },
11240                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11241                         0x00000000, 0xffffffff },
11242                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11243                         0x00000000, 0x000003ff },
11244                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11245                         0x00000000, 0xffffffff },
11246                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11247                         0x00000000, 0x000000ff },
11248                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11249                         0x00000000, 0xffffffff },
11250                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11251                         0x00000000, 0x000000ff },
11252                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11253                         0x00000000, 0xffffffff },
11254                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11255                         0x00000000, 0xffffffff },
11256                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11257                         0x00000000, 0xffffffff },
11258                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11259                         0x00000000, 0x000000ff },
11260                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11261                         0x00000000, 0xffffffff },
11262                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11263                         0x00000000, 0x000000ff },
11264                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11265                         0x00000000, 0xffffffff },
11266                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11267                         0x00000000, 0xffffffff },
11268                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11269                         0x00000000, 0xffffffff },
11270                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11271                         0x00000000, 0xffffffff },
11272                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11273                         0x00000000, 0xffffffff },
11274                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11275                         0xffffffff, 0x00000000 },
11276                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11277                         0xffffffff, 0x00000000 },
11278
11279                 /* Buffer Manager Control Registers. */
11280                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11281                         0x00000000, 0x007fff80 },
11282                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11283                         0x00000000, 0x007fffff },
11284                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11285                         0x00000000, 0x0000003f },
11286                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11287                         0x00000000, 0x000001ff },
11288                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11289                         0x00000000, 0x000001ff },
11290                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11291                         0xffffffff, 0x00000000 },
11292                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11293                         0xffffffff, 0x00000000 },
11294
11295                 /* Mailbox Registers */
11296                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11297                         0x00000000, 0x000001ff },
11298                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11299                         0x00000000, 0x000001ff },
11300                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11301                         0x00000000, 0x000007ff },
11302                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11303                         0x00000000, 0x000001ff },
11304
11305                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11306         };
11307
11308         is_5705 = is_5750 = 0;
11309         if (tg3_flag(tp, 5705_PLUS)) {
11310                 is_5705 = 1;
11311                 if (tg3_flag(tp, 5750_PLUS))
11312                         is_5750 = 1;
11313         }
11314
11315         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11316                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11317                         continue;
11318
11319                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11320                         continue;
11321
11322                 if (tg3_flag(tp, IS_5788) &&
11323                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11324                         continue;
11325
11326                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11327                         continue;
11328
11329                 offset = (u32) reg_tbl[i].offset;
11330                 read_mask = reg_tbl[i].read_mask;
11331                 write_mask = reg_tbl[i].write_mask;
11332
11333                 /* Save the original register content */
11334                 save_val = tr32(offset);
11335
11336                 /* Determine the read-only value. */
11337                 read_val = save_val & read_mask;
11338
11339                 /* Write zero to the register, then make sure the read-only bits
11340                  * are not changed and the read/write bits are all zeros.
11341                  */
11342                 tw32(offset, 0);
11343
11344                 val = tr32(offset);
11345
11346                 /* Test the read-only and read/write bits. */
11347                 if (((val & read_mask) != read_val) || (val & write_mask))
11348                         goto out;
11349
11350                 /* Write ones to all the bits defined by RdMask and WrMask, then
11351                  * make sure the read-only bits are not changed and the
11352                  * read/write bits are all ones.
11353                  */
11354                 tw32(offset, read_mask | write_mask);
11355
11356                 val = tr32(offset);
11357
11358                 /* Test the read-only bits. */
11359                 if ((val & read_mask) != read_val)
11360                         goto out;
11361
11362                 /* Test the read/write bits. */
11363                 if ((val & write_mask) != write_mask)
11364                         goto out;
11365
11366                 tw32(offset, save_val);
11367         }
11368
11369         return 0;
11370
11371 out:
11372         if (netif_msg_hw(tp))
11373                 netdev_err(tp->dev,
11374                            "Register test failed at offset %x\n", offset);
11375         tw32(offset, save_val);
11376         return -EIO;
11377 }
11378
11379 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11380 {
11381         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11382         int i;
11383         u32 j;
11384
11385         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11386                 for (j = 0; j < len; j += 4) {
11387                         u32 val;
11388
11389                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11390                         tg3_read_mem(tp, offset + j, &val);
11391                         if (val != test_pattern[i])
11392                                 return -EIO;
11393                 }
11394         }
11395         return 0;
11396 }
11397
11398 static int tg3_test_memory(struct tg3 *tp)
11399 {
11400         static struct mem_entry {
11401                 u32 offset;
11402                 u32 len;
11403         } mem_tbl_570x[] = {
11404                 { 0x00000000, 0x00b50},
11405                 { 0x00002000, 0x1c000},
11406                 { 0xffffffff, 0x00000}
11407         }, mem_tbl_5705[] = {
11408                 { 0x00000100, 0x0000c},
11409                 { 0x00000200, 0x00008},
11410                 { 0x00004000, 0x00800},
11411                 { 0x00006000, 0x01000},
11412                 { 0x00008000, 0x02000},
11413                 { 0x00010000, 0x0e000},
11414                 { 0xffffffff, 0x00000}
11415         }, mem_tbl_5755[] = {
11416                 { 0x00000200, 0x00008},
11417                 { 0x00004000, 0x00800},
11418                 { 0x00006000, 0x00800},
11419                 { 0x00008000, 0x02000},
11420                 { 0x00010000, 0x0c000},
11421                 { 0xffffffff, 0x00000}
11422         }, mem_tbl_5906[] = {
11423                 { 0x00000200, 0x00008},
11424                 { 0x00004000, 0x00400},
11425                 { 0x00006000, 0x00400},
11426                 { 0x00008000, 0x01000},
11427                 { 0x00010000, 0x01000},
11428                 { 0xffffffff, 0x00000}
11429         }, mem_tbl_5717[] = {
11430                 { 0x00000200, 0x00008},
11431                 { 0x00010000, 0x0a000},
11432                 { 0x00020000, 0x13c00},
11433                 { 0xffffffff, 0x00000}
11434         }, mem_tbl_57765[] = {
11435                 { 0x00000200, 0x00008},
11436                 { 0x00004000, 0x00800},
11437                 { 0x00006000, 0x09800},
11438                 { 0x00010000, 0x0a000},
11439                 { 0xffffffff, 0x00000}
11440         };
11441         struct mem_entry *mem_tbl;
11442         int err = 0;
11443         int i;
11444
11445         if (tg3_flag(tp, 5717_PLUS))
11446                 mem_tbl = mem_tbl_5717;
11447         else if (tg3_flag(tp, 57765_CLASS))
11448                 mem_tbl = mem_tbl_57765;
11449         else if (tg3_flag(tp, 5755_PLUS))
11450                 mem_tbl = mem_tbl_5755;
11451         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11452                 mem_tbl = mem_tbl_5906;
11453         else if (tg3_flag(tp, 5705_PLUS))
11454                 mem_tbl = mem_tbl_5705;
11455         else
11456                 mem_tbl = mem_tbl_570x;
11457
11458         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11459                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11460                 if (err)
11461                         break;
11462         }
11463
11464         return err;
11465 }
11466
11467 #define TG3_TSO_MSS             500
11468
11469 #define TG3_TSO_IP_HDR_LEN      20
11470 #define TG3_TSO_TCP_HDR_LEN     20
11471 #define TG3_TSO_TCP_OPT_LEN     12
11472
11473 static const u8 tg3_tso_header[] = {
11474 0x08, 0x00,
11475 0x45, 0x00, 0x00, 0x00,
11476 0x00, 0x00, 0x40, 0x00,
11477 0x40, 0x06, 0x00, 0x00,
11478 0x0a, 0x00, 0x00, 0x01,
11479 0x0a, 0x00, 0x00, 0x02,
11480 0x0d, 0x00, 0xe0, 0x00,
11481 0x00, 0x00, 0x01, 0x00,
11482 0x00, 0x00, 0x02, 0x00,
11483 0x80, 0x10, 0x10, 0x00,
11484 0x14, 0x09, 0x00, 0x00,
11485 0x01, 0x01, 0x08, 0x0a,
11486 0x11, 0x11, 0x11, 0x11,
11487 0x11, 0x11, 0x11, 0x11,
11488 };
11489
11490 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11491 {
11492         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11493         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11494         u32 budget;
11495         struct sk_buff *skb;
11496         u8 *tx_data, *rx_data;
11497         dma_addr_t map;
11498         int num_pkts, tx_len, rx_len, i, err;
11499         struct tg3_rx_buffer_desc *desc;
11500         struct tg3_napi *tnapi, *rnapi;
11501         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11502
11503         tnapi = &tp->napi[0];
11504         rnapi = &tp->napi[0];
11505         if (tp->irq_cnt > 1) {
11506                 if (tg3_flag(tp, ENABLE_RSS))
11507                         rnapi = &tp->napi[1];
11508                 if (tg3_flag(tp, ENABLE_TSS))
11509                         tnapi = &tp->napi[1];
11510         }
11511         coal_now = tnapi->coal_now | rnapi->coal_now;
11512
11513         err = -EIO;
11514
11515         tx_len = pktsz;
11516         skb = netdev_alloc_skb(tp->dev, tx_len);
11517         if (!skb)
11518                 return -ENOMEM;
11519
11520         tx_data = skb_put(skb, tx_len);
11521         memcpy(tx_data, tp->dev->dev_addr, 6);
11522         memset(tx_data + 6, 0x0, 8);
11523
11524         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11525
11526         if (tso_loopback) {
11527                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11528
11529                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11530                               TG3_TSO_TCP_OPT_LEN;
11531
11532                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11533                        sizeof(tg3_tso_header));
11534                 mss = TG3_TSO_MSS;
11535
11536                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11537                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11538
11539                 /* Set the total length field in the IP header */
11540                 iph->tot_len = htons((u16)(mss + hdr_len));
11541
11542                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11543                               TXD_FLAG_CPU_POST_DMA);
11544
11545                 if (tg3_flag(tp, HW_TSO_1) ||
11546                     tg3_flag(tp, HW_TSO_2) ||
11547                     tg3_flag(tp, HW_TSO_3)) {
11548                         struct tcphdr *th;
11549                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11550                         th = (struct tcphdr *)&tx_data[val];
11551                         th->check = 0;
11552                 } else
11553                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11554
11555                 if (tg3_flag(tp, HW_TSO_3)) {
11556                         mss |= (hdr_len & 0xc) << 12;
11557                         if (hdr_len & 0x10)
11558                                 base_flags |= 0x00000010;
11559                         base_flags |= (hdr_len & 0x3e0) << 5;
11560                 } else if (tg3_flag(tp, HW_TSO_2))
11561                         mss |= hdr_len << 9;
11562                 else if (tg3_flag(tp, HW_TSO_1) ||
11563                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11564                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11565                 } else {
11566                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11567                 }
11568
11569                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11570         } else {
11571                 num_pkts = 1;
11572                 data_off = ETH_HLEN;
11573         }
11574
11575         for (i = data_off; i < tx_len; i++)
11576                 tx_data[i] = (u8) (i & 0xff);
11577
11578         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11579         if (pci_dma_mapping_error(tp->pdev, map)) {
11580                 dev_kfree_skb(skb);
11581                 return -EIO;
11582         }
11583
11584         val = tnapi->tx_prod;
11585         tnapi->tx_buffers[val].skb = skb;
11586         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11587
11588         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11589                rnapi->coal_now);
11590
11591         udelay(10);
11592
11593         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11594
11595         budget = tg3_tx_avail(tnapi);
11596         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11597                             base_flags | TXD_FLAG_END, mss, 0)) {
11598                 tnapi->tx_buffers[val].skb = NULL;
11599                 dev_kfree_skb(skb);
11600                 return -EIO;
11601         }
11602
11603         tnapi->tx_prod++;
11604
11605         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11606         tr32_mailbox(tnapi->prodmbox);
11607
11608         udelay(10);
11609
11610         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11611         for (i = 0; i < 35; i++) {
11612                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11613                        coal_now);
11614
11615                 udelay(10);
11616
11617                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11618                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11619                 if ((tx_idx == tnapi->tx_prod) &&
11620                     (rx_idx == (rx_start_idx + num_pkts)))
11621                         break;
11622         }
11623
11624         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11625         dev_kfree_skb(skb);
11626
11627         if (tx_idx != tnapi->tx_prod)
11628                 goto out;
11629
11630         if (rx_idx != rx_start_idx + num_pkts)
11631                 goto out;
11632
11633         val = data_off;
11634         while (rx_idx != rx_start_idx) {
11635                 desc = &rnapi->rx_rcb[rx_start_idx++];
11636                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11637                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11638
11639                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11640                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11641                         goto out;
11642
11643                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11644                          - ETH_FCS_LEN;
11645
11646                 if (!tso_loopback) {
11647                         if (rx_len != tx_len)
11648                                 goto out;
11649
11650                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11651                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11652                                         goto out;
11653                         } else {
11654                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11655                                         goto out;
11656                         }
11657                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11658                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11659                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11660                         goto out;
11661                 }
11662
11663                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11664                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11665                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11666                                              mapping);
11667                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11668                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11669                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11670                                              mapping);
11671                 } else
11672                         goto out;
11673
11674                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11675                                             PCI_DMA_FROMDEVICE);
11676
11677                 rx_data += TG3_RX_OFFSET(tp);
11678                 for (i = data_off; i < rx_len; i++, val++) {
11679                         if (*(rx_data + i) != (u8) (val & 0xff))
11680                                 goto out;
11681                 }
11682         }
11683
11684         err = 0;
11685
11686         /* tg3_free_rings will unmap and free the rx_data */
11687 out:
11688         return err;
11689 }
11690
11691 #define TG3_STD_LOOPBACK_FAILED         1
11692 #define TG3_JMB_LOOPBACK_FAILED         2
11693 #define TG3_TSO_LOOPBACK_FAILED         4
11694 #define TG3_LOOPBACK_FAILED \
11695         (TG3_STD_LOOPBACK_FAILED | \
11696          TG3_JMB_LOOPBACK_FAILED | \
11697          TG3_TSO_LOOPBACK_FAILED)
11698
11699 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11700 {
11701         int err = -EIO;
11702         u32 eee_cap;
11703
11704         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11705         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11706
11707         if (!netif_running(tp->dev)) {
11708                 data[0] = TG3_LOOPBACK_FAILED;
11709                 data[1] = TG3_LOOPBACK_FAILED;
11710                 if (do_extlpbk)
11711                         data[2] = TG3_LOOPBACK_FAILED;
11712                 goto done;
11713         }
11714
11715         err = tg3_reset_hw(tp, 1);
11716         if (err) {
11717                 data[0] = TG3_LOOPBACK_FAILED;
11718                 data[1] = TG3_LOOPBACK_FAILED;
11719                 if (do_extlpbk)
11720                         data[2] = TG3_LOOPBACK_FAILED;
11721                 goto done;
11722         }
11723
11724         if (tg3_flag(tp, ENABLE_RSS)) {
11725                 int i;
11726
11727                 /* Reroute all rx packets to the 1st queue */
11728                 for (i = MAC_RSS_INDIR_TBL_0;
11729                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11730                         tw32(i, 0x0);
11731         }
11732
11733         /* HW errata - mac loopback fails in some cases on 5780.
11734          * Normal traffic and PHY loopback are not affected by
11735          * errata.  Also, the MAC loopback test is deprecated for
11736          * all newer ASIC revisions.
11737          */
11738         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11739             !tg3_flag(tp, CPMU_PRESENT)) {
11740                 tg3_mac_loopback(tp, true);
11741
11742                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11743                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11744
11745                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11746                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11747                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11748
11749                 tg3_mac_loopback(tp, false);
11750         }
11751
11752         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11753             !tg3_flag(tp, USE_PHYLIB)) {
11754                 int i;
11755
11756                 tg3_phy_lpbk_set(tp, 0, false);
11757
11758                 /* Wait for link */
11759                 for (i = 0; i < 100; i++) {
11760                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11761                                 break;
11762                         mdelay(1);
11763                 }
11764
11765                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11766                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11767                 if (tg3_flag(tp, TSO_CAPABLE) &&
11768                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11769                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11770                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11771                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11772                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11773
11774                 if (do_extlpbk) {
11775                         tg3_phy_lpbk_set(tp, 0, true);
11776
11777                         /* All link indications report up, but the hardware
11778                          * isn't really ready for about 20 msec.  Double it
11779                          * to be sure.
11780                          */
11781                         mdelay(40);
11782
11783                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11784                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11785                         if (tg3_flag(tp, TSO_CAPABLE) &&
11786                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11787                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11788                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11789                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11790                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11791                 }
11792
11793                 /* Re-enable gphy autopowerdown. */
11794                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11795                         tg3_phy_toggle_apd(tp, true);
11796         }
11797
11798         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11799
11800 done:
11801         tp->phy_flags |= eee_cap;
11802
11803         return err;
11804 }
11805
11806 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11807                           u64 *data)
11808 {
11809         struct tg3 *tp = netdev_priv(dev);
11810         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11811
11812         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11813             tg3_power_up(tp)) {
11814                 etest->flags |= ETH_TEST_FL_FAILED;
11815                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11816                 return;
11817         }
11818
11819         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11820
11821         if (tg3_test_nvram(tp) != 0) {
11822                 etest->flags |= ETH_TEST_FL_FAILED;
11823                 data[0] = 1;
11824         }
11825         if (!doextlpbk && tg3_test_link(tp)) {
11826                 etest->flags |= ETH_TEST_FL_FAILED;
11827                 data[1] = 1;
11828         }
11829         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11830                 int err, err2 = 0, irq_sync = 0;
11831
11832                 if (netif_running(dev)) {
11833                         tg3_phy_stop(tp);
11834                         tg3_netif_stop(tp);
11835                         irq_sync = 1;
11836                 }
11837
11838                 tg3_full_lock(tp, irq_sync);
11839
11840                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11841                 err = tg3_nvram_lock(tp);
11842                 tg3_halt_cpu(tp, RX_CPU_BASE);
11843                 if (!tg3_flag(tp, 5705_PLUS))
11844                         tg3_halt_cpu(tp, TX_CPU_BASE);
11845                 if (!err)
11846                         tg3_nvram_unlock(tp);
11847
11848                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11849                         tg3_phy_reset(tp);
11850
11851                 if (tg3_test_registers(tp) != 0) {
11852                         etest->flags |= ETH_TEST_FL_FAILED;
11853                         data[2] = 1;
11854                 }
11855
11856                 if (tg3_test_memory(tp) != 0) {
11857                         etest->flags |= ETH_TEST_FL_FAILED;
11858                         data[3] = 1;
11859                 }
11860
11861                 if (doextlpbk)
11862                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11863
11864                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11865                         etest->flags |= ETH_TEST_FL_FAILED;
11866
11867                 tg3_full_unlock(tp);
11868
11869                 if (tg3_test_interrupt(tp) != 0) {
11870                         etest->flags |= ETH_TEST_FL_FAILED;
11871                         data[7] = 1;
11872                 }
11873
11874                 tg3_full_lock(tp, 0);
11875
11876                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11877                 if (netif_running(dev)) {
11878                         tg3_flag_set(tp, INIT_COMPLETE);
11879                         err2 = tg3_restart_hw(tp, 1);
11880                         if (!err2)
11881                                 tg3_netif_start(tp);
11882                 }
11883
11884                 tg3_full_unlock(tp);
11885
11886                 if (irq_sync && !err2)
11887                         tg3_phy_start(tp);
11888         }
11889         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11890                 tg3_power_down(tp);
11891
11892 }
11893
11894 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11895 {
11896         struct mii_ioctl_data *data = if_mii(ifr);
11897         struct tg3 *tp = netdev_priv(dev);
11898         int err;
11899
11900         if (tg3_flag(tp, USE_PHYLIB)) {
11901                 struct phy_device *phydev;
11902                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11903                         return -EAGAIN;
11904                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11905                 return phy_mii_ioctl(phydev, ifr, cmd);
11906         }
11907
11908         switch (cmd) {
11909         case SIOCGMIIPHY:
11910                 data->phy_id = tp->phy_addr;
11911
11912                 /* fallthru */
11913         case SIOCGMIIREG: {
11914                 u32 mii_regval;
11915
11916                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11917                         break;                  /* We have no PHY */
11918
11919                 if (!netif_running(dev))
11920                         return -EAGAIN;
11921
11922                 spin_lock_bh(&tp->lock);
11923                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11924                 spin_unlock_bh(&tp->lock);
11925
11926                 data->val_out = mii_regval;
11927
11928                 return err;
11929         }
11930
11931         case SIOCSMIIREG:
11932                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11933                         break;                  /* We have no PHY */
11934
11935                 if (!netif_running(dev))
11936                         return -EAGAIN;
11937
11938                 spin_lock_bh(&tp->lock);
11939                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11940                 spin_unlock_bh(&tp->lock);
11941
11942                 return err;
11943
11944         default:
11945                 /* do nothing */
11946                 break;
11947         }
11948         return -EOPNOTSUPP;
11949 }
11950
11951 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11952 {
11953         struct tg3 *tp = netdev_priv(dev);
11954
11955         memcpy(ec, &tp->coal, sizeof(*ec));
11956         return 0;
11957 }
11958
11959 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11960 {
11961         struct tg3 *tp = netdev_priv(dev);
11962         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11963         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11964
11965         if (!tg3_flag(tp, 5705_PLUS)) {
11966                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11967                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11968                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11969                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11970         }
11971
11972         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11973             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11974             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11975             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11976             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11977             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11978             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11979             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11980             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11981             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11982                 return -EINVAL;
11983
11984         /* No rx interrupts will be generated if both are zero */
11985         if ((ec->rx_coalesce_usecs == 0) &&
11986             (ec->rx_max_coalesced_frames == 0))
11987                 return -EINVAL;
11988
11989         /* No tx interrupts will be generated if both are zero */
11990         if ((ec->tx_coalesce_usecs == 0) &&
11991             (ec->tx_max_coalesced_frames == 0))
11992                 return -EINVAL;
11993
11994         /* Only copy relevant parameters, ignore all others. */
11995         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11996         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11997         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11998         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11999         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12000         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12001         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12002         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12003         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12004
12005         if (netif_running(dev)) {
12006                 tg3_full_lock(tp, 0);
12007                 __tg3_set_coalesce(tp, &tp->coal);
12008                 tg3_full_unlock(tp);
12009         }
12010         return 0;
12011 }
12012
12013 static const struct ethtool_ops tg3_ethtool_ops = {
12014         .get_settings           = tg3_get_settings,
12015         .set_settings           = tg3_set_settings,
12016         .get_drvinfo            = tg3_get_drvinfo,
12017         .get_regs_len           = tg3_get_regs_len,
12018         .get_regs               = tg3_get_regs,
12019         .get_wol                = tg3_get_wol,
12020         .set_wol                = tg3_set_wol,
12021         .get_msglevel           = tg3_get_msglevel,
12022         .set_msglevel           = tg3_set_msglevel,
12023         .nway_reset             = tg3_nway_reset,
12024         .get_link               = ethtool_op_get_link,
12025         .get_eeprom_len         = tg3_get_eeprom_len,
12026         .get_eeprom             = tg3_get_eeprom,
12027         .set_eeprom             = tg3_set_eeprom,
12028         .get_ringparam          = tg3_get_ringparam,
12029         .set_ringparam          = tg3_set_ringparam,
12030         .get_pauseparam         = tg3_get_pauseparam,
12031         .set_pauseparam         = tg3_set_pauseparam,
12032         .self_test              = tg3_self_test,
12033         .get_strings            = tg3_get_strings,
12034         .set_phys_id            = tg3_set_phys_id,
12035         .get_ethtool_stats      = tg3_get_ethtool_stats,
12036         .get_coalesce           = tg3_get_coalesce,
12037         .set_coalesce           = tg3_set_coalesce,
12038         .get_sset_count         = tg3_get_sset_count,
12039         .get_rxnfc              = tg3_get_rxnfc,
12040         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12041         .get_rxfh_indir         = tg3_get_rxfh_indir,
12042         .set_rxfh_indir         = tg3_set_rxfh_indir,
12043 };
12044
12045 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12046 {
12047         u32 cursize, val, magic;
12048
12049         tp->nvram_size = EEPROM_CHIP_SIZE;
12050
12051         if (tg3_nvram_read(tp, 0, &magic) != 0)
12052                 return;
12053
12054         if ((magic != TG3_EEPROM_MAGIC) &&
12055             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12056             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12057                 return;
12058
12059         /*
12060          * Size the chip by reading offsets at increasing powers of two.
12061          * When we encounter our validation signature, we know the addressing
12062          * has wrapped around, and thus have our chip size.
12063          */
12064         cursize = 0x10;
12065
12066         while (cursize < tp->nvram_size) {
12067                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12068                         return;
12069
12070                 if (val == magic)
12071                         break;
12072
12073                 cursize <<= 1;
12074         }
12075
12076         tp->nvram_size = cursize;
12077 }
12078
12079 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12080 {
12081         u32 val;
12082
12083         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12084                 return;
12085
12086         /* Selfboot format */
12087         if (val != TG3_EEPROM_MAGIC) {
12088                 tg3_get_eeprom_size(tp);
12089                 return;
12090         }
12091
12092         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12093                 if (val != 0) {
12094                         /* This is confusing.  We want to operate on the
12095                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12096                          * call will read from NVRAM and byteswap the data
12097                          * according to the byteswapping settings for all
12098                          * other register accesses.  This ensures the data we
12099                          * want will always reside in the lower 16-bits.
12100                          * However, the data in NVRAM is in LE format, which
12101                          * means the data from the NVRAM read will always be
12102                          * opposite the endianness of the CPU.  The 16-bit
12103                          * byteswap then brings the data to CPU endianness.
12104                          */
12105                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12106                         return;
12107                 }
12108         }
12109         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12110 }
12111
12112 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12113 {
12114         u32 nvcfg1;
12115
12116         nvcfg1 = tr32(NVRAM_CFG1);
12117         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12118                 tg3_flag_set(tp, FLASH);
12119         } else {
12120                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12121                 tw32(NVRAM_CFG1, nvcfg1);
12122         }
12123
12124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12125             tg3_flag(tp, 5780_CLASS)) {
12126                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12127                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12128                         tp->nvram_jedecnum = JEDEC_ATMEL;
12129                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12130                         tg3_flag_set(tp, NVRAM_BUFFERED);
12131                         break;
12132                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12133                         tp->nvram_jedecnum = JEDEC_ATMEL;
12134                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12135                         break;
12136                 case FLASH_VENDOR_ATMEL_EEPROM:
12137                         tp->nvram_jedecnum = JEDEC_ATMEL;
12138                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12139                         tg3_flag_set(tp, NVRAM_BUFFERED);
12140                         break;
12141                 case FLASH_VENDOR_ST:
12142                         tp->nvram_jedecnum = JEDEC_ST;
12143                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12144                         tg3_flag_set(tp, NVRAM_BUFFERED);
12145                         break;
12146                 case FLASH_VENDOR_SAIFUN:
12147                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12148                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12149                         break;
12150                 case FLASH_VENDOR_SST_SMALL:
12151                 case FLASH_VENDOR_SST_LARGE:
12152                         tp->nvram_jedecnum = JEDEC_SST;
12153                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12154                         break;
12155                 }
12156         } else {
12157                 tp->nvram_jedecnum = JEDEC_ATMEL;
12158                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12159                 tg3_flag_set(tp, NVRAM_BUFFERED);
12160         }
12161 }
12162
12163 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12164 {
12165         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12166         case FLASH_5752PAGE_SIZE_256:
12167                 tp->nvram_pagesize = 256;
12168                 break;
12169         case FLASH_5752PAGE_SIZE_512:
12170                 tp->nvram_pagesize = 512;
12171                 break;
12172         case FLASH_5752PAGE_SIZE_1K:
12173                 tp->nvram_pagesize = 1024;
12174                 break;
12175         case FLASH_5752PAGE_SIZE_2K:
12176                 tp->nvram_pagesize = 2048;
12177                 break;
12178         case FLASH_5752PAGE_SIZE_4K:
12179                 tp->nvram_pagesize = 4096;
12180                 break;
12181         case FLASH_5752PAGE_SIZE_264:
12182                 tp->nvram_pagesize = 264;
12183                 break;
12184         case FLASH_5752PAGE_SIZE_528:
12185                 tp->nvram_pagesize = 528;
12186                 break;
12187         }
12188 }
12189
12190 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12191 {
12192         u32 nvcfg1;
12193
12194         nvcfg1 = tr32(NVRAM_CFG1);
12195
12196         /* NVRAM protection for TPM */
12197         if (nvcfg1 & (1 << 27))
12198                 tg3_flag_set(tp, PROTECTED_NVRAM);
12199
12200         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12201         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12202         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12203                 tp->nvram_jedecnum = JEDEC_ATMEL;
12204                 tg3_flag_set(tp, NVRAM_BUFFERED);
12205                 break;
12206         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12207                 tp->nvram_jedecnum = JEDEC_ATMEL;
12208                 tg3_flag_set(tp, NVRAM_BUFFERED);
12209                 tg3_flag_set(tp, FLASH);
12210                 break;
12211         case FLASH_5752VENDOR_ST_M45PE10:
12212         case FLASH_5752VENDOR_ST_M45PE20:
12213         case FLASH_5752VENDOR_ST_M45PE40:
12214                 tp->nvram_jedecnum = JEDEC_ST;
12215                 tg3_flag_set(tp, NVRAM_BUFFERED);
12216                 tg3_flag_set(tp, FLASH);
12217                 break;
12218         }
12219
12220         if (tg3_flag(tp, FLASH)) {
12221                 tg3_nvram_get_pagesize(tp, nvcfg1);
12222         } else {
12223                 /* For eeprom, set pagesize to maximum eeprom size */
12224                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12225
12226                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12227                 tw32(NVRAM_CFG1, nvcfg1);
12228         }
12229 }
12230
12231 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12232 {
12233         u32 nvcfg1, protect = 0;
12234
12235         nvcfg1 = tr32(NVRAM_CFG1);
12236
12237         /* NVRAM protection for TPM */
12238         if (nvcfg1 & (1 << 27)) {
12239                 tg3_flag_set(tp, PROTECTED_NVRAM);
12240                 protect = 1;
12241         }
12242
12243         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12244         switch (nvcfg1) {
12245         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12246         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12247         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12248         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12249                 tp->nvram_jedecnum = JEDEC_ATMEL;
12250                 tg3_flag_set(tp, NVRAM_BUFFERED);
12251                 tg3_flag_set(tp, FLASH);
12252                 tp->nvram_pagesize = 264;
12253                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12254                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12255                         tp->nvram_size = (protect ? 0x3e200 :
12256                                           TG3_NVRAM_SIZE_512KB);
12257                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12258                         tp->nvram_size = (protect ? 0x1f200 :
12259                                           TG3_NVRAM_SIZE_256KB);
12260                 else
12261                         tp->nvram_size = (protect ? 0x1f200 :
12262                                           TG3_NVRAM_SIZE_128KB);
12263                 break;
12264         case FLASH_5752VENDOR_ST_M45PE10:
12265         case FLASH_5752VENDOR_ST_M45PE20:
12266         case FLASH_5752VENDOR_ST_M45PE40:
12267                 tp->nvram_jedecnum = JEDEC_ST;
12268                 tg3_flag_set(tp, NVRAM_BUFFERED);
12269                 tg3_flag_set(tp, FLASH);
12270                 tp->nvram_pagesize = 256;
12271                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12272                         tp->nvram_size = (protect ?
12273                                           TG3_NVRAM_SIZE_64KB :
12274                                           TG3_NVRAM_SIZE_128KB);
12275                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12276                         tp->nvram_size = (protect ?
12277                                           TG3_NVRAM_SIZE_64KB :
12278                                           TG3_NVRAM_SIZE_256KB);
12279                 else
12280                         tp->nvram_size = (protect ?
12281                                           TG3_NVRAM_SIZE_128KB :
12282                                           TG3_NVRAM_SIZE_512KB);
12283                 break;
12284         }
12285 }
12286
12287 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12288 {
12289         u32 nvcfg1;
12290
12291         nvcfg1 = tr32(NVRAM_CFG1);
12292
12293         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12294         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12295         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12296         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12297         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12298                 tp->nvram_jedecnum = JEDEC_ATMEL;
12299                 tg3_flag_set(tp, NVRAM_BUFFERED);
12300                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12301
12302                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12303                 tw32(NVRAM_CFG1, nvcfg1);
12304                 break;
12305         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12306         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12307         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12308         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12309                 tp->nvram_jedecnum = JEDEC_ATMEL;
12310                 tg3_flag_set(tp, NVRAM_BUFFERED);
12311                 tg3_flag_set(tp, FLASH);
12312                 tp->nvram_pagesize = 264;
12313                 break;
12314         case FLASH_5752VENDOR_ST_M45PE10:
12315         case FLASH_5752VENDOR_ST_M45PE20:
12316         case FLASH_5752VENDOR_ST_M45PE40:
12317                 tp->nvram_jedecnum = JEDEC_ST;
12318                 tg3_flag_set(tp, NVRAM_BUFFERED);
12319                 tg3_flag_set(tp, FLASH);
12320                 tp->nvram_pagesize = 256;
12321                 break;
12322         }
12323 }
12324
12325 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12326 {
12327         u32 nvcfg1, protect = 0;
12328
12329         nvcfg1 = tr32(NVRAM_CFG1);
12330
12331         /* NVRAM protection for TPM */
12332         if (nvcfg1 & (1 << 27)) {
12333                 tg3_flag_set(tp, PROTECTED_NVRAM);
12334                 protect = 1;
12335         }
12336
12337         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12338         switch (nvcfg1) {
12339         case FLASH_5761VENDOR_ATMEL_ADB021D:
12340         case FLASH_5761VENDOR_ATMEL_ADB041D:
12341         case FLASH_5761VENDOR_ATMEL_ADB081D:
12342         case FLASH_5761VENDOR_ATMEL_ADB161D:
12343         case FLASH_5761VENDOR_ATMEL_MDB021D:
12344         case FLASH_5761VENDOR_ATMEL_MDB041D:
12345         case FLASH_5761VENDOR_ATMEL_MDB081D:
12346         case FLASH_5761VENDOR_ATMEL_MDB161D:
12347                 tp->nvram_jedecnum = JEDEC_ATMEL;
12348                 tg3_flag_set(tp, NVRAM_BUFFERED);
12349                 tg3_flag_set(tp, FLASH);
12350                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12351                 tp->nvram_pagesize = 256;
12352                 break;
12353         case FLASH_5761VENDOR_ST_A_M45PE20:
12354         case FLASH_5761VENDOR_ST_A_M45PE40:
12355         case FLASH_5761VENDOR_ST_A_M45PE80:
12356         case FLASH_5761VENDOR_ST_A_M45PE16:
12357         case FLASH_5761VENDOR_ST_M_M45PE20:
12358         case FLASH_5761VENDOR_ST_M_M45PE40:
12359         case FLASH_5761VENDOR_ST_M_M45PE80:
12360         case FLASH_5761VENDOR_ST_M_M45PE16:
12361                 tp->nvram_jedecnum = JEDEC_ST;
12362                 tg3_flag_set(tp, NVRAM_BUFFERED);
12363                 tg3_flag_set(tp, FLASH);
12364                 tp->nvram_pagesize = 256;
12365                 break;
12366         }
12367
12368         if (protect) {
12369                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12370         } else {
12371                 switch (nvcfg1) {
12372                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12373                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12374                 case FLASH_5761VENDOR_ST_A_M45PE16:
12375                 case FLASH_5761VENDOR_ST_M_M45PE16:
12376                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12377                         break;
12378                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12379                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12380                 case FLASH_5761VENDOR_ST_A_M45PE80:
12381                 case FLASH_5761VENDOR_ST_M_M45PE80:
12382                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12383                         break;
12384                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12385                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12386                 case FLASH_5761VENDOR_ST_A_M45PE40:
12387                 case FLASH_5761VENDOR_ST_M_M45PE40:
12388                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12389                         break;
12390                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12391                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12392                 case FLASH_5761VENDOR_ST_A_M45PE20:
12393                 case FLASH_5761VENDOR_ST_M_M45PE20:
12394                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12395                         break;
12396                 }
12397         }
12398 }
12399
12400 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12401 {
12402         tp->nvram_jedecnum = JEDEC_ATMEL;
12403         tg3_flag_set(tp, NVRAM_BUFFERED);
12404         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12405 }
12406
12407 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12408 {
12409         u32 nvcfg1;
12410
12411         nvcfg1 = tr32(NVRAM_CFG1);
12412
12413         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12414         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12415         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12416                 tp->nvram_jedecnum = JEDEC_ATMEL;
12417                 tg3_flag_set(tp, NVRAM_BUFFERED);
12418                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12419
12420                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12421                 tw32(NVRAM_CFG1, nvcfg1);
12422                 return;
12423         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12424         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12425         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12426         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12427         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12428         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12429         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12430                 tp->nvram_jedecnum = JEDEC_ATMEL;
12431                 tg3_flag_set(tp, NVRAM_BUFFERED);
12432                 tg3_flag_set(tp, FLASH);
12433
12434                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12435                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12436                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12437                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12438                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12439                         break;
12440                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12441                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12442                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12443                         break;
12444                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12445                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12446                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12447                         break;
12448                 }
12449                 break;
12450         case FLASH_5752VENDOR_ST_M45PE10:
12451         case FLASH_5752VENDOR_ST_M45PE20:
12452         case FLASH_5752VENDOR_ST_M45PE40:
12453                 tp->nvram_jedecnum = JEDEC_ST;
12454                 tg3_flag_set(tp, NVRAM_BUFFERED);
12455                 tg3_flag_set(tp, FLASH);
12456
12457                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12458                 case FLASH_5752VENDOR_ST_M45PE10:
12459                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12460                         break;
12461                 case FLASH_5752VENDOR_ST_M45PE20:
12462                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12463                         break;
12464                 case FLASH_5752VENDOR_ST_M45PE40:
12465                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12466                         break;
12467                 }
12468                 break;
12469         default:
12470                 tg3_flag_set(tp, NO_NVRAM);
12471                 return;
12472         }
12473
12474         tg3_nvram_get_pagesize(tp, nvcfg1);
12475         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12476                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12477 }
12478
12479
12480 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12481 {
12482         u32 nvcfg1;
12483
12484         nvcfg1 = tr32(NVRAM_CFG1);
12485
12486         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12487         case FLASH_5717VENDOR_ATMEL_EEPROM:
12488         case FLASH_5717VENDOR_MICRO_EEPROM:
12489                 tp->nvram_jedecnum = JEDEC_ATMEL;
12490                 tg3_flag_set(tp, NVRAM_BUFFERED);
12491                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12492
12493                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12494                 tw32(NVRAM_CFG1, nvcfg1);
12495                 return;
12496         case FLASH_5717VENDOR_ATMEL_MDB011D:
12497         case FLASH_5717VENDOR_ATMEL_ADB011B:
12498         case FLASH_5717VENDOR_ATMEL_ADB011D:
12499         case FLASH_5717VENDOR_ATMEL_MDB021D:
12500         case FLASH_5717VENDOR_ATMEL_ADB021B:
12501         case FLASH_5717VENDOR_ATMEL_ADB021D:
12502         case FLASH_5717VENDOR_ATMEL_45USPT:
12503                 tp->nvram_jedecnum = JEDEC_ATMEL;
12504                 tg3_flag_set(tp, NVRAM_BUFFERED);
12505                 tg3_flag_set(tp, FLASH);
12506
12507                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12508                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12509                         /* Detect size with tg3_nvram_get_size() */
12510                         break;
12511                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12512                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12513                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12514                         break;
12515                 default:
12516                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12517                         break;
12518                 }
12519                 break;
12520         case FLASH_5717VENDOR_ST_M_M25PE10:
12521         case FLASH_5717VENDOR_ST_A_M25PE10:
12522         case FLASH_5717VENDOR_ST_M_M45PE10:
12523         case FLASH_5717VENDOR_ST_A_M45PE10:
12524         case FLASH_5717VENDOR_ST_M_M25PE20:
12525         case FLASH_5717VENDOR_ST_A_M25PE20:
12526         case FLASH_5717VENDOR_ST_M_M45PE20:
12527         case FLASH_5717VENDOR_ST_A_M45PE20:
12528         case FLASH_5717VENDOR_ST_25USPT:
12529         case FLASH_5717VENDOR_ST_45USPT:
12530                 tp->nvram_jedecnum = JEDEC_ST;
12531                 tg3_flag_set(tp, NVRAM_BUFFERED);
12532                 tg3_flag_set(tp, FLASH);
12533
12534                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12535                 case FLASH_5717VENDOR_ST_M_M25PE20:
12536                 case FLASH_5717VENDOR_ST_M_M45PE20:
12537                         /* Detect size with tg3_nvram_get_size() */
12538                         break;
12539                 case FLASH_5717VENDOR_ST_A_M25PE20:
12540                 case FLASH_5717VENDOR_ST_A_M45PE20:
12541                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12542                         break;
12543                 default:
12544                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12545                         break;
12546                 }
12547                 break;
12548         default:
12549                 tg3_flag_set(tp, NO_NVRAM);
12550                 return;
12551         }
12552
12553         tg3_nvram_get_pagesize(tp, nvcfg1);
12554         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12555                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12556 }
12557
12558 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12559 {
12560         u32 nvcfg1, nvmpinstrp;
12561
12562         nvcfg1 = tr32(NVRAM_CFG1);
12563         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12564
12565         switch (nvmpinstrp) {
12566         case FLASH_5720_EEPROM_HD:
12567         case FLASH_5720_EEPROM_LD:
12568                 tp->nvram_jedecnum = JEDEC_ATMEL;
12569                 tg3_flag_set(tp, NVRAM_BUFFERED);
12570
12571                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12572                 tw32(NVRAM_CFG1, nvcfg1);
12573                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12574                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12575                 else
12576                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12577                 return;
12578         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12579         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12580         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12581         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12582         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12583         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12584         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12585         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12586         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12587         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12588         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12589         case FLASH_5720VENDOR_ATMEL_45USPT:
12590                 tp->nvram_jedecnum = JEDEC_ATMEL;
12591                 tg3_flag_set(tp, NVRAM_BUFFERED);
12592                 tg3_flag_set(tp, FLASH);
12593
12594                 switch (nvmpinstrp) {
12595                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12596                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12597                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12598                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12599                         break;
12600                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12601                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12602                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12603                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12604                         break;
12605                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12606                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12607                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12608                         break;
12609                 default:
12610                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12611                         break;
12612                 }
12613                 break;
12614         case FLASH_5720VENDOR_M_ST_M25PE10:
12615         case FLASH_5720VENDOR_M_ST_M45PE10:
12616         case FLASH_5720VENDOR_A_ST_M25PE10:
12617         case FLASH_5720VENDOR_A_ST_M45PE10:
12618         case FLASH_5720VENDOR_M_ST_M25PE20:
12619         case FLASH_5720VENDOR_M_ST_M45PE20:
12620         case FLASH_5720VENDOR_A_ST_M25PE20:
12621         case FLASH_5720VENDOR_A_ST_M45PE20:
12622         case FLASH_5720VENDOR_M_ST_M25PE40:
12623         case FLASH_5720VENDOR_M_ST_M45PE40:
12624         case FLASH_5720VENDOR_A_ST_M25PE40:
12625         case FLASH_5720VENDOR_A_ST_M45PE40:
12626         case FLASH_5720VENDOR_M_ST_M25PE80:
12627         case FLASH_5720VENDOR_M_ST_M45PE80:
12628         case FLASH_5720VENDOR_A_ST_M25PE80:
12629         case FLASH_5720VENDOR_A_ST_M45PE80:
12630         case FLASH_5720VENDOR_ST_25USPT:
12631         case FLASH_5720VENDOR_ST_45USPT:
12632                 tp->nvram_jedecnum = JEDEC_ST;
12633                 tg3_flag_set(tp, NVRAM_BUFFERED);
12634                 tg3_flag_set(tp, FLASH);
12635
12636                 switch (nvmpinstrp) {
12637                 case FLASH_5720VENDOR_M_ST_M25PE20:
12638                 case FLASH_5720VENDOR_M_ST_M45PE20:
12639                 case FLASH_5720VENDOR_A_ST_M25PE20:
12640                 case FLASH_5720VENDOR_A_ST_M45PE20:
12641                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12642                         break;
12643                 case FLASH_5720VENDOR_M_ST_M25PE40:
12644                 case FLASH_5720VENDOR_M_ST_M45PE40:
12645                 case FLASH_5720VENDOR_A_ST_M25PE40:
12646                 case FLASH_5720VENDOR_A_ST_M45PE40:
12647                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12648                         break;
12649                 case FLASH_5720VENDOR_M_ST_M25PE80:
12650                 case FLASH_5720VENDOR_M_ST_M45PE80:
12651                 case FLASH_5720VENDOR_A_ST_M25PE80:
12652                 case FLASH_5720VENDOR_A_ST_M45PE80:
12653                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12654                         break;
12655                 default:
12656                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12657                         break;
12658                 }
12659                 break;
12660         default:
12661                 tg3_flag_set(tp, NO_NVRAM);
12662                 return;
12663         }
12664
12665         tg3_nvram_get_pagesize(tp, nvcfg1);
12666         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12667                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12668 }
12669
12670 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12671 static void __devinit tg3_nvram_init(struct tg3 *tp)
12672 {
12673         tw32_f(GRC_EEPROM_ADDR,
12674              (EEPROM_ADDR_FSM_RESET |
12675               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12676                EEPROM_ADDR_CLKPERD_SHIFT)));
12677
12678         msleep(1);
12679
12680         /* Enable seeprom accesses. */
12681         tw32_f(GRC_LOCAL_CTRL,
12682              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12683         udelay(100);
12684
12685         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12686             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12687                 tg3_flag_set(tp, NVRAM);
12688
12689                 if (tg3_nvram_lock(tp)) {
12690                         netdev_warn(tp->dev,
12691                                     "Cannot get nvram lock, %s failed\n",
12692                                     __func__);
12693                         return;
12694                 }
12695                 tg3_enable_nvram_access(tp);
12696
12697                 tp->nvram_size = 0;
12698
12699                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12700                         tg3_get_5752_nvram_info(tp);
12701                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12702                         tg3_get_5755_nvram_info(tp);
12703                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12704                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12705                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12706                         tg3_get_5787_nvram_info(tp);
12707                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12708                         tg3_get_5761_nvram_info(tp);
12709                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12710                         tg3_get_5906_nvram_info(tp);
12711                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12712                          tg3_flag(tp, 57765_CLASS))
12713                         tg3_get_57780_nvram_info(tp);
12714                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12715                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12716                         tg3_get_5717_nvram_info(tp);
12717                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12718                         tg3_get_5720_nvram_info(tp);
12719                 else
12720                         tg3_get_nvram_info(tp);
12721
12722                 if (tp->nvram_size == 0)
12723                         tg3_get_nvram_size(tp);
12724
12725                 tg3_disable_nvram_access(tp);
12726                 tg3_nvram_unlock(tp);
12727
12728         } else {
12729                 tg3_flag_clear(tp, NVRAM);
12730                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12731
12732                 tg3_get_eeprom_size(tp);
12733         }
12734 }
12735
12736 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12737                                     u32 offset, u32 len, u8 *buf)
12738 {
12739         int i, j, rc = 0;
12740         u32 val;
12741
12742         for (i = 0; i < len; i += 4) {
12743                 u32 addr;
12744                 __be32 data;
12745
12746                 addr = offset + i;
12747
12748                 memcpy(&data, buf + i, 4);
12749
12750                 /*
12751                  * The SEEPROM interface expects the data to always be opposite
12752                  * the native endian format.  We accomplish this by reversing
12753                  * all the operations that would have been performed on the
12754                  * data from a call to tg3_nvram_read_be32().
12755                  */
12756                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12757
12758                 val = tr32(GRC_EEPROM_ADDR);
12759                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12760
12761                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12762                         EEPROM_ADDR_READ);
12763                 tw32(GRC_EEPROM_ADDR, val |
12764                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12765                         (addr & EEPROM_ADDR_ADDR_MASK) |
12766                         EEPROM_ADDR_START |
12767                         EEPROM_ADDR_WRITE);
12768
12769                 for (j = 0; j < 1000; j++) {
12770                         val = tr32(GRC_EEPROM_ADDR);
12771
12772                         if (val & EEPROM_ADDR_COMPLETE)
12773                                 break;
12774                         msleep(1);
12775                 }
12776                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12777                         rc = -EBUSY;
12778                         break;
12779                 }
12780         }
12781
12782         return rc;
12783 }
12784
12785 /* offset and length are dword aligned */
12786 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12787                 u8 *buf)
12788 {
12789         int ret = 0;
12790         u32 pagesize = tp->nvram_pagesize;
12791         u32 pagemask = pagesize - 1;
12792         u32 nvram_cmd;
12793         u8 *tmp;
12794
12795         tmp = kmalloc(pagesize, GFP_KERNEL);
12796         if (tmp == NULL)
12797                 return -ENOMEM;
12798
12799         while (len) {
12800                 int j;
12801                 u32 phy_addr, page_off, size;
12802
12803                 phy_addr = offset & ~pagemask;
12804
12805                 for (j = 0; j < pagesize; j += 4) {
12806                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12807                                                   (__be32 *) (tmp + j));
12808                         if (ret)
12809                                 break;
12810                 }
12811                 if (ret)
12812                         break;
12813
12814                 page_off = offset & pagemask;
12815                 size = pagesize;
12816                 if (len < size)
12817                         size = len;
12818
12819                 len -= size;
12820
12821                 memcpy(tmp + page_off, buf, size);
12822
12823                 offset = offset + (pagesize - page_off);
12824
12825                 tg3_enable_nvram_access(tp);
12826
12827                 /*
12828                  * Before we can erase the flash page, we need
12829                  * to issue a special "write enable" command.
12830                  */
12831                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12832
12833                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12834                         break;
12835
12836                 /* Erase the target page */
12837                 tw32(NVRAM_ADDR, phy_addr);
12838
12839                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12840                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12841
12842                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12843                         break;
12844
12845                 /* Issue another write enable to start the write. */
12846                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12847
12848                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12849                         break;
12850
12851                 for (j = 0; j < pagesize; j += 4) {
12852                         __be32 data;
12853
12854                         data = *((__be32 *) (tmp + j));
12855
12856                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12857
12858                         tw32(NVRAM_ADDR, phy_addr + j);
12859
12860                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12861                                 NVRAM_CMD_WR;
12862
12863                         if (j == 0)
12864                                 nvram_cmd |= NVRAM_CMD_FIRST;
12865                         else if (j == (pagesize - 4))
12866                                 nvram_cmd |= NVRAM_CMD_LAST;
12867
12868                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12869                                 break;
12870                 }
12871                 if (ret)
12872                         break;
12873         }
12874
12875         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12876         tg3_nvram_exec_cmd(tp, nvram_cmd);
12877
12878         kfree(tmp);
12879
12880         return ret;
12881 }
12882
12883 /* offset and length are dword aligned */
12884 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12885                 u8 *buf)
12886 {
12887         int i, ret = 0;
12888
12889         for (i = 0; i < len; i += 4, offset += 4) {
12890                 u32 page_off, phy_addr, nvram_cmd;
12891                 __be32 data;
12892
12893                 memcpy(&data, buf + i, 4);
12894                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12895
12896                 page_off = offset % tp->nvram_pagesize;
12897
12898                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12899
12900                 tw32(NVRAM_ADDR, phy_addr);
12901
12902                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12903
12904                 if (page_off == 0 || i == 0)
12905                         nvram_cmd |= NVRAM_CMD_FIRST;
12906                 if (page_off == (tp->nvram_pagesize - 4))
12907                         nvram_cmd |= NVRAM_CMD_LAST;
12908
12909                 if (i == (len - 4))
12910                         nvram_cmd |= NVRAM_CMD_LAST;
12911
12912                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12913                     !tg3_flag(tp, 5755_PLUS) &&
12914                     (tp->nvram_jedecnum == JEDEC_ST) &&
12915                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12916
12917                         if ((ret = tg3_nvram_exec_cmd(tp,
12918                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12919                                 NVRAM_CMD_DONE)))
12920
12921                                 break;
12922                 }
12923                 if (!tg3_flag(tp, FLASH)) {
12924                         /* We always do complete word writes to eeprom. */
12925                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12926                 }
12927
12928                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12929                         break;
12930         }
12931         return ret;
12932 }
12933
12934 /* offset and length are dword aligned */
12935 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12936 {
12937         int ret;
12938
12939         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12940                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12941                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12942                 udelay(40);
12943         }
12944
12945         if (!tg3_flag(tp, NVRAM)) {
12946                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12947         } else {
12948                 u32 grc_mode;
12949
12950                 ret = tg3_nvram_lock(tp);
12951                 if (ret)
12952                         return ret;
12953
12954                 tg3_enable_nvram_access(tp);
12955                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12956                         tw32(NVRAM_WRITE1, 0x406);
12957
12958                 grc_mode = tr32(GRC_MODE);
12959                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12960
12961                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12962                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12963                                 buf);
12964                 } else {
12965                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12966                                 buf);
12967                 }
12968
12969                 grc_mode = tr32(GRC_MODE);
12970                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12971
12972                 tg3_disable_nvram_access(tp);
12973                 tg3_nvram_unlock(tp);
12974         }
12975
12976         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12977                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12978                 udelay(40);
12979         }
12980
12981         return ret;
12982 }
12983
12984 struct subsys_tbl_ent {
12985         u16 subsys_vendor, subsys_devid;
12986         u32 phy_id;
12987 };
12988
12989 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12990         /* Broadcom boards. */
12991         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12992           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12993         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12994           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12995         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12996           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12997         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12998           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12999         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13000           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13001         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13002           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13003         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13004           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13005         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13006           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13007         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13008           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13009         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13010           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13011         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13012           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13013
13014         /* 3com boards. */
13015         { TG3PCI_SUBVENDOR_ID_3COM,
13016           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13017         { TG3PCI_SUBVENDOR_ID_3COM,
13018           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13019         { TG3PCI_SUBVENDOR_ID_3COM,
13020           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13021         { TG3PCI_SUBVENDOR_ID_3COM,
13022           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13023         { TG3PCI_SUBVENDOR_ID_3COM,
13024           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13025
13026         /* DELL boards. */
13027         { TG3PCI_SUBVENDOR_ID_DELL,
13028           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13029         { TG3PCI_SUBVENDOR_ID_DELL,
13030           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13031         { TG3PCI_SUBVENDOR_ID_DELL,
13032           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13033         { TG3PCI_SUBVENDOR_ID_DELL,
13034           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13035
13036         /* Compaq boards. */
13037         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13038           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13039         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13040           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13041         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13042           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13043         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13044           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13045         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13046           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13047
13048         /* IBM boards. */
13049         { TG3PCI_SUBVENDOR_ID_IBM,
13050           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13051 };
13052
13053 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13054 {
13055         int i;
13056
13057         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13058                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13059                      tp->pdev->subsystem_vendor) &&
13060                     (subsys_id_to_phy_id[i].subsys_devid ==
13061                      tp->pdev->subsystem_device))
13062                         return &subsys_id_to_phy_id[i];
13063         }
13064         return NULL;
13065 }
13066
13067 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13068 {
13069         u32 val;
13070
13071         tp->phy_id = TG3_PHY_ID_INVALID;
13072         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13073
13074         /* Assume an onboard device and WOL capable by default.  */
13075         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13076         tg3_flag_set(tp, WOL_CAP);
13077
13078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13079                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13080                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13081                         tg3_flag_set(tp, IS_NIC);
13082                 }
13083                 val = tr32(VCPU_CFGSHDW);
13084                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13085                         tg3_flag_set(tp, ASPM_WORKAROUND);
13086                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13087                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13088                         tg3_flag_set(tp, WOL_ENABLE);
13089                         device_set_wakeup_enable(&tp->pdev->dev, true);
13090                 }
13091                 goto done;
13092         }
13093
13094         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13095         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13096                 u32 nic_cfg, led_cfg;
13097                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13098                 int eeprom_phy_serdes = 0;
13099
13100                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13101                 tp->nic_sram_data_cfg = nic_cfg;
13102
13103                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13104                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13105                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13106                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13107                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13108                     (ver > 0) && (ver < 0x100))
13109                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13110
13111                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13112                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13113
13114                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13115                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13116                         eeprom_phy_serdes = 1;
13117
13118                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13119                 if (nic_phy_id != 0) {
13120                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13121                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13122
13123                         eeprom_phy_id  = (id1 >> 16) << 10;
13124                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13125                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13126                 } else
13127                         eeprom_phy_id = 0;
13128
13129                 tp->phy_id = eeprom_phy_id;
13130                 if (eeprom_phy_serdes) {
13131                         if (!tg3_flag(tp, 5705_PLUS))
13132                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13133                         else
13134                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13135                 }
13136
13137                 if (tg3_flag(tp, 5750_PLUS))
13138                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13139                                     SHASTA_EXT_LED_MODE_MASK);
13140                 else
13141                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13142
13143                 switch (led_cfg) {
13144                 default:
13145                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13146                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13147                         break;
13148
13149                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13150                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13151                         break;
13152
13153                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13154                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13155
13156                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13157                          * read on some older 5700/5701 bootcode.
13158                          */
13159                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13160                             ASIC_REV_5700 ||
13161                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13162                             ASIC_REV_5701)
13163                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13164
13165                         break;
13166
13167                 case SHASTA_EXT_LED_SHARED:
13168                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13169                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13170                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13171                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13172                                                  LED_CTRL_MODE_PHY_2);
13173                         break;
13174
13175                 case SHASTA_EXT_LED_MAC:
13176                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13177                         break;
13178
13179                 case SHASTA_EXT_LED_COMBO:
13180                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13181                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13182                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13183                                                  LED_CTRL_MODE_PHY_2);
13184                         break;
13185
13186                 }
13187
13188                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13189                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13190                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13191                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13192
13193                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13194                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13195
13196                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13197                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13198                         if ((tp->pdev->subsystem_vendor ==
13199                              PCI_VENDOR_ID_ARIMA) &&
13200                             (tp->pdev->subsystem_device == 0x205a ||
13201                              tp->pdev->subsystem_device == 0x2063))
13202                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13203                 } else {
13204                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13205                         tg3_flag_set(tp, IS_NIC);
13206                 }
13207
13208                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13209                         tg3_flag_set(tp, ENABLE_ASF);
13210                         if (tg3_flag(tp, 5750_PLUS))
13211                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13212                 }
13213
13214                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13215                     tg3_flag(tp, 5750_PLUS))
13216                         tg3_flag_set(tp, ENABLE_APE);
13217
13218                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13219                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13220                         tg3_flag_clear(tp, WOL_CAP);
13221
13222                 if (tg3_flag(tp, WOL_CAP) &&
13223                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13224                         tg3_flag_set(tp, WOL_ENABLE);
13225                         device_set_wakeup_enable(&tp->pdev->dev, true);
13226                 }
13227
13228                 if (cfg2 & (1 << 17))
13229                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13230
13231                 /* serdes signal pre-emphasis in register 0x590 set by */
13232                 /* bootcode if bit 18 is set */
13233                 if (cfg2 & (1 << 18))
13234                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13235
13236                 if ((tg3_flag(tp, 57765_PLUS) ||
13237                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13238                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13239                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13240                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13241
13242                 if (tg3_flag(tp, PCI_EXPRESS) &&
13243                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13244                     !tg3_flag(tp, 57765_PLUS)) {
13245                         u32 cfg3;
13246
13247                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13248                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13249                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13250                 }
13251
13252                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13253                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13254                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13255                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13256                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13257                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13258         }
13259 done:
13260         if (tg3_flag(tp, WOL_CAP))
13261                 device_set_wakeup_enable(&tp->pdev->dev,
13262                                          tg3_flag(tp, WOL_ENABLE));
13263         else
13264                 device_set_wakeup_capable(&tp->pdev->dev, false);
13265 }
13266
13267 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13268 {
13269         int i;
13270         u32 val;
13271
13272         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13273         tw32(OTP_CTRL, cmd);
13274
13275         /* Wait for up to 1 ms for command to execute. */
13276         for (i = 0; i < 100; i++) {
13277                 val = tr32(OTP_STATUS);
13278                 if (val & OTP_STATUS_CMD_DONE)
13279                         break;
13280                 udelay(10);
13281         }
13282
13283         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13284 }
13285
13286 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13287  * configuration is a 32-bit value that straddles the alignment boundary.
13288  * We do two 32-bit reads and then shift and merge the results.
13289  */
13290 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13291 {
13292         u32 bhalf_otp, thalf_otp;
13293
13294         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13295
13296         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13297                 return 0;
13298
13299         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13300
13301         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13302                 return 0;
13303
13304         thalf_otp = tr32(OTP_READ_DATA);
13305
13306         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13307
13308         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13309                 return 0;
13310
13311         bhalf_otp = tr32(OTP_READ_DATA);
13312
13313         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13314 }
13315
13316 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13317 {
13318         u32 adv = ADVERTISED_Autoneg;
13319
13320         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13321                 adv |= ADVERTISED_1000baseT_Half |
13322                        ADVERTISED_1000baseT_Full;
13323
13324         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13325                 adv |= ADVERTISED_100baseT_Half |
13326                        ADVERTISED_100baseT_Full |
13327                        ADVERTISED_10baseT_Half |
13328                        ADVERTISED_10baseT_Full |
13329                        ADVERTISED_TP;
13330         else
13331                 adv |= ADVERTISED_FIBRE;
13332
13333         tp->link_config.advertising = adv;
13334         tp->link_config.speed = SPEED_INVALID;
13335         tp->link_config.duplex = DUPLEX_INVALID;
13336         tp->link_config.autoneg = AUTONEG_ENABLE;
13337         tp->link_config.active_speed = SPEED_INVALID;
13338         tp->link_config.active_duplex = DUPLEX_INVALID;
13339         tp->link_config.orig_speed = SPEED_INVALID;
13340         tp->link_config.orig_duplex = DUPLEX_INVALID;
13341         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13342 }
13343
13344 static int __devinit tg3_phy_probe(struct tg3 *tp)
13345 {
13346         u32 hw_phy_id_1, hw_phy_id_2;
13347         u32 hw_phy_id, hw_phy_id_masked;
13348         int err;
13349
13350         /* flow control autonegotiation is default behavior */
13351         tg3_flag_set(tp, PAUSE_AUTONEG);
13352         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13353
13354         if (tg3_flag(tp, USE_PHYLIB))
13355                 return tg3_phy_init(tp);
13356
13357         /* Reading the PHY ID register can conflict with ASF
13358          * firmware access to the PHY hardware.
13359          */
13360         err = 0;
13361         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13362                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13363         } else {
13364                 /* Now read the physical PHY_ID from the chip and verify
13365                  * that it is sane.  If it doesn't look good, we fall back
13366                  * to either the hard-coded table based PHY_ID and failing
13367                  * that the value found in the eeprom area.
13368                  */
13369                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13370                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13371
13372                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13373                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13374                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13375
13376                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13377         }
13378
13379         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13380                 tp->phy_id = hw_phy_id;
13381                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13382                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13383                 else
13384                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13385         } else {
13386                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13387                         /* Do nothing, phy ID already set up in
13388                          * tg3_get_eeprom_hw_cfg().
13389                          */
13390                 } else {
13391                         struct subsys_tbl_ent *p;
13392
13393                         /* No eeprom signature?  Try the hardcoded
13394                          * subsys device table.
13395                          */
13396                         p = tg3_lookup_by_subsys(tp);
13397                         if (!p)
13398                                 return -ENODEV;
13399
13400                         tp->phy_id = p->phy_id;
13401                         if (!tp->phy_id ||
13402                             tp->phy_id == TG3_PHY_ID_BCM8002)
13403                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13404                 }
13405         }
13406
13407         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13408             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13409              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13410              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13411               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13412              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13413               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13414                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13415
13416         tg3_phy_init_link_config(tp);
13417
13418         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13419             !tg3_flag(tp, ENABLE_APE) &&
13420             !tg3_flag(tp, ENABLE_ASF)) {
13421                 u32 bmsr, dummy;
13422
13423                 tg3_readphy(tp, MII_BMSR, &bmsr);
13424                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13425                     (bmsr & BMSR_LSTATUS))
13426                         goto skip_phy_reset;
13427
13428                 err = tg3_phy_reset(tp);
13429                 if (err)
13430                         return err;
13431
13432                 tg3_phy_set_wirespeed(tp);
13433
13434                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13435                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13436                                             tp->link_config.flowctrl);
13437
13438                         tg3_writephy(tp, MII_BMCR,
13439                                      BMCR_ANENABLE | BMCR_ANRESTART);
13440                 }
13441         }
13442
13443 skip_phy_reset:
13444         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13445                 err = tg3_init_5401phy_dsp(tp);
13446                 if (err)
13447                         return err;
13448
13449                 err = tg3_init_5401phy_dsp(tp);
13450         }
13451
13452         return err;
13453 }
13454
13455 static void __devinit tg3_read_vpd(struct tg3 *tp)
13456 {
13457         u8 *vpd_data;
13458         unsigned int block_end, rosize, len;
13459         u32 vpdlen;
13460         int j, i = 0;
13461
13462         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13463         if (!vpd_data)
13464                 goto out_no_vpd;
13465
13466         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13467         if (i < 0)
13468                 goto out_not_found;
13469
13470         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13471         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13472         i += PCI_VPD_LRDT_TAG_SIZE;
13473
13474         if (block_end > vpdlen)
13475                 goto out_not_found;
13476
13477         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13478                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13479         if (j > 0) {
13480                 len = pci_vpd_info_field_size(&vpd_data[j]);
13481
13482                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13483                 if (j + len > block_end || len != 4 ||
13484                     memcmp(&vpd_data[j], "1028", 4))
13485                         goto partno;
13486
13487                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13488                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13489                 if (j < 0)
13490                         goto partno;
13491
13492                 len = pci_vpd_info_field_size(&vpd_data[j]);
13493
13494                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13495                 if (j + len > block_end)
13496                         goto partno;
13497
13498                 memcpy(tp->fw_ver, &vpd_data[j], len);
13499                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13500         }
13501
13502 partno:
13503         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13504                                       PCI_VPD_RO_KEYWORD_PARTNO);
13505         if (i < 0)
13506                 goto out_not_found;
13507
13508         len = pci_vpd_info_field_size(&vpd_data[i]);
13509
13510         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13511         if (len > TG3_BPN_SIZE ||
13512             (len + i) > vpdlen)
13513                 goto out_not_found;
13514
13515         memcpy(tp->board_part_number, &vpd_data[i], len);
13516
13517 out_not_found:
13518         kfree(vpd_data);
13519         if (tp->board_part_number[0])
13520                 return;
13521
13522 out_no_vpd:
13523         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13524                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13525                         strcpy(tp->board_part_number, "BCM5717");
13526                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13527                         strcpy(tp->board_part_number, "BCM5718");
13528                 else
13529                         goto nomatch;
13530         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13531                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13532                         strcpy(tp->board_part_number, "BCM57780");
13533                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13534                         strcpy(tp->board_part_number, "BCM57760");
13535                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13536                         strcpy(tp->board_part_number, "BCM57790");
13537                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13538                         strcpy(tp->board_part_number, "BCM57788");
13539                 else
13540                         goto nomatch;
13541         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13542                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13543                         strcpy(tp->board_part_number, "BCM57761");
13544                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13545                         strcpy(tp->board_part_number, "BCM57765");
13546                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13547                         strcpy(tp->board_part_number, "BCM57781");
13548                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13549                         strcpy(tp->board_part_number, "BCM57785");
13550                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13551                         strcpy(tp->board_part_number, "BCM57791");
13552                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13553                         strcpy(tp->board_part_number, "BCM57795");
13554                 else
13555                         goto nomatch;
13556         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13557                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13558                         strcpy(tp->board_part_number, "BCM57762");
13559                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13560                         strcpy(tp->board_part_number, "BCM57766");
13561                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13562                         strcpy(tp->board_part_number, "BCM57782");
13563                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13564                         strcpy(tp->board_part_number, "BCM57786");
13565                 else
13566                         goto nomatch;
13567         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13568                 strcpy(tp->board_part_number, "BCM95906");
13569         } else {
13570 nomatch:
13571                 strcpy(tp->board_part_number, "none");
13572         }
13573 }
13574
13575 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13576 {
13577         u32 val;
13578
13579         if (tg3_nvram_read(tp, offset, &val) ||
13580             (val & 0xfc000000) != 0x0c000000 ||
13581             tg3_nvram_read(tp, offset + 4, &val) ||
13582             val != 0)
13583                 return 0;
13584
13585         return 1;
13586 }
13587
13588 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13589 {
13590         u32 val, offset, start, ver_offset;
13591         int i, dst_off;
13592         bool newver = false;
13593
13594         if (tg3_nvram_read(tp, 0xc, &offset) ||
13595             tg3_nvram_read(tp, 0x4, &start))
13596                 return;
13597
13598         offset = tg3_nvram_logical_addr(tp, offset);
13599
13600         if (tg3_nvram_read(tp, offset, &val))
13601                 return;
13602
13603         if ((val & 0xfc000000) == 0x0c000000) {
13604                 if (tg3_nvram_read(tp, offset + 4, &val))
13605                         return;
13606
13607                 if (val == 0)
13608                         newver = true;
13609         }
13610
13611         dst_off = strlen(tp->fw_ver);
13612
13613         if (newver) {
13614                 if (TG3_VER_SIZE - dst_off < 16 ||
13615                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13616                         return;
13617
13618                 offset = offset + ver_offset - start;
13619                 for (i = 0; i < 16; i += 4) {
13620                         __be32 v;
13621                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13622                                 return;
13623
13624                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13625                 }
13626         } else {
13627                 u32 major, minor;
13628
13629                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13630                         return;
13631
13632                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13633                         TG3_NVM_BCVER_MAJSFT;
13634                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13635                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13636                          "v%d.%02d", major, minor);
13637         }
13638 }
13639
13640 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13641 {
13642         u32 val, major, minor;
13643
13644         /* Use native endian representation */
13645         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13646                 return;
13647
13648         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13649                 TG3_NVM_HWSB_CFG1_MAJSFT;
13650         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13651                 TG3_NVM_HWSB_CFG1_MINSFT;
13652
13653         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13654 }
13655
13656 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13657 {
13658         u32 offset, major, minor, build;
13659
13660         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13661
13662         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13663                 return;
13664
13665         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13666         case TG3_EEPROM_SB_REVISION_0:
13667                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13668                 break;
13669         case TG3_EEPROM_SB_REVISION_2:
13670                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13671                 break;
13672         case TG3_EEPROM_SB_REVISION_3:
13673                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13674                 break;
13675         case TG3_EEPROM_SB_REVISION_4:
13676                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13677                 break;
13678         case TG3_EEPROM_SB_REVISION_5:
13679                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13680                 break;
13681         case TG3_EEPROM_SB_REVISION_6:
13682                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13683                 break;
13684         default:
13685                 return;
13686         }
13687
13688         if (tg3_nvram_read(tp, offset, &val))
13689                 return;
13690
13691         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13692                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13693         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13694                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13695         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13696
13697         if (minor > 99 || build > 26)
13698                 return;
13699
13700         offset = strlen(tp->fw_ver);
13701         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13702                  " v%d.%02d", major, minor);
13703
13704         if (build > 0) {
13705                 offset = strlen(tp->fw_ver);
13706                 if (offset < TG3_VER_SIZE - 1)
13707                         tp->fw_ver[offset] = 'a' + build - 1;
13708         }
13709 }
13710
13711 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13712 {
13713         u32 val, offset, start;
13714         int i, vlen;
13715
13716         for (offset = TG3_NVM_DIR_START;
13717              offset < TG3_NVM_DIR_END;
13718              offset += TG3_NVM_DIRENT_SIZE) {
13719                 if (tg3_nvram_read(tp, offset, &val))
13720                         return;
13721
13722                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13723                         break;
13724         }
13725
13726         if (offset == TG3_NVM_DIR_END)
13727                 return;
13728
13729         if (!tg3_flag(tp, 5705_PLUS))
13730                 start = 0x08000000;
13731         else if (tg3_nvram_read(tp, offset - 4, &start))
13732                 return;
13733
13734         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13735             !tg3_fw_img_is_valid(tp, offset) ||
13736             tg3_nvram_read(tp, offset + 8, &val))
13737                 return;
13738
13739         offset += val - start;
13740
13741         vlen = strlen(tp->fw_ver);
13742
13743         tp->fw_ver[vlen++] = ',';
13744         tp->fw_ver[vlen++] = ' ';
13745
13746         for (i = 0; i < 4; i++) {
13747                 __be32 v;
13748                 if (tg3_nvram_read_be32(tp, offset, &v))
13749                         return;
13750
13751                 offset += sizeof(v);
13752
13753                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13754                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13755                         break;
13756                 }
13757
13758                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13759                 vlen += sizeof(v);
13760         }
13761 }
13762
13763 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13764 {
13765         int vlen;
13766         u32 apedata;
13767         char *fwtype;
13768
13769         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13770                 return;
13771
13772         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13773         if (apedata != APE_SEG_SIG_MAGIC)
13774                 return;
13775
13776         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13777         if (!(apedata & APE_FW_STATUS_READY))
13778                 return;
13779
13780         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13781
13782         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13783                 tg3_flag_set(tp, APE_HAS_NCSI);
13784                 fwtype = "NCSI";
13785         } else {
13786                 fwtype = "DASH";
13787         }
13788
13789         vlen = strlen(tp->fw_ver);
13790
13791         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13792                  fwtype,
13793                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13794                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13795                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13796                  (apedata & APE_FW_VERSION_BLDMSK));
13797 }
13798
13799 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13800 {
13801         u32 val;
13802         bool vpd_vers = false;
13803
13804         if (tp->fw_ver[0] != 0)
13805                 vpd_vers = true;
13806
13807         if (tg3_flag(tp, NO_NVRAM)) {
13808                 strcat(tp->fw_ver, "sb");
13809                 return;
13810         }
13811
13812         if (tg3_nvram_read(tp, 0, &val))
13813                 return;
13814
13815         if (val == TG3_EEPROM_MAGIC)
13816                 tg3_read_bc_ver(tp);
13817         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13818                 tg3_read_sb_ver(tp, val);
13819         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13820                 tg3_read_hwsb_ver(tp);
13821         else
13822                 return;
13823
13824         if (vpd_vers)
13825                 goto done;
13826
13827         if (tg3_flag(tp, ENABLE_APE)) {
13828                 if (tg3_flag(tp, ENABLE_ASF))
13829                         tg3_read_dash_ver(tp);
13830         } else if (tg3_flag(tp, ENABLE_ASF)) {
13831                 tg3_read_mgmtfw_ver(tp);
13832         }
13833
13834 done:
13835         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13836 }
13837
13838 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13839
13840 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13841 {
13842         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13843                 return TG3_RX_RET_MAX_SIZE_5717;
13844         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13845                 return TG3_RX_RET_MAX_SIZE_5700;
13846         else
13847                 return TG3_RX_RET_MAX_SIZE_5705;
13848 }
13849
13850 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13851         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13852         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13853         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13854         { },
13855 };
13856
13857 static int __devinit tg3_get_invariants(struct tg3 *tp)
13858 {
13859         u32 misc_ctrl_reg;
13860         u32 pci_state_reg, grc_misc_cfg;
13861         u32 val;
13862         u16 pci_cmd;
13863         int err;
13864
13865         /* Force memory write invalidate off.  If we leave it on,
13866          * then on 5700_BX chips we have to enable a workaround.
13867          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13868          * to match the cacheline size.  The Broadcom driver have this
13869          * workaround but turns MWI off all the times so never uses
13870          * it.  This seems to suggest that the workaround is insufficient.
13871          */
13872         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13873         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13874         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13875
13876         /* Important! -- Make sure register accesses are byteswapped
13877          * correctly.  Also, for those chips that require it, make
13878          * sure that indirect register accesses are enabled before
13879          * the first operation.
13880          */
13881         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13882                               &misc_ctrl_reg);
13883         tp->misc_host_ctrl |= (misc_ctrl_reg &
13884                                MISC_HOST_CTRL_CHIPREV);
13885         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13886                                tp->misc_host_ctrl);
13887
13888         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13889                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13890         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13891                 u32 prod_id_asic_rev;
13892
13893                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13894                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13895                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13896                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13897                         pci_read_config_dword(tp->pdev,
13898                                               TG3PCI_GEN2_PRODID_ASICREV,
13899                                               &prod_id_asic_rev);
13900                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13901                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13902                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13903                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13904                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13905                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13906                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13907                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13908                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13909                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13910                         pci_read_config_dword(tp->pdev,
13911                                               TG3PCI_GEN15_PRODID_ASICREV,
13912                                               &prod_id_asic_rev);
13913                 else
13914                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13915                                               &prod_id_asic_rev);
13916
13917                 tp->pci_chip_rev_id = prod_id_asic_rev;
13918         }
13919
13920         /* Wrong chip ID in 5752 A0. This code can be removed later
13921          * as A0 is not in production.
13922          */
13923         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13924                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13925
13926         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13927          * we need to disable memory and use config. cycles
13928          * only to access all registers. The 5702/03 chips
13929          * can mistakenly decode the special cycles from the
13930          * ICH chipsets as memory write cycles, causing corruption
13931          * of register and memory space. Only certain ICH bridges
13932          * will drive special cycles with non-zero data during the
13933          * address phase which can fall within the 5703's address
13934          * range. This is not an ICH bug as the PCI spec allows
13935          * non-zero address during special cycles. However, only
13936          * these ICH bridges are known to drive non-zero addresses
13937          * during special cycles.
13938          *
13939          * Since special cycles do not cross PCI bridges, we only
13940          * enable this workaround if the 5703 is on the secondary
13941          * bus of these ICH bridges.
13942          */
13943         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13944             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13945                 static struct tg3_dev_id {
13946                         u32     vendor;
13947                         u32     device;
13948                         u32     rev;
13949                 } ich_chipsets[] = {
13950                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13951                           PCI_ANY_ID },
13952                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13953                           PCI_ANY_ID },
13954                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13955                           0xa },
13956                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13957                           PCI_ANY_ID },
13958                         { },
13959                 };
13960                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13961                 struct pci_dev *bridge = NULL;
13962
13963                 while (pci_id->vendor != 0) {
13964                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13965                                                 bridge);
13966                         if (!bridge) {
13967                                 pci_id++;
13968                                 continue;
13969                         }
13970                         if (pci_id->rev != PCI_ANY_ID) {
13971                                 if (bridge->revision > pci_id->rev)
13972                                         continue;
13973                         }
13974                         if (bridge->subordinate &&
13975                             (bridge->subordinate->number ==
13976                              tp->pdev->bus->number)) {
13977                                 tg3_flag_set(tp, ICH_WORKAROUND);
13978                                 pci_dev_put(bridge);
13979                                 break;
13980                         }
13981                 }
13982         }
13983
13984         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13985                 static struct tg3_dev_id {
13986                         u32     vendor;
13987                         u32     device;
13988                 } bridge_chipsets[] = {
13989                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13990                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13991                         { },
13992                 };
13993                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13994                 struct pci_dev *bridge = NULL;
13995
13996                 while (pci_id->vendor != 0) {
13997                         bridge = pci_get_device(pci_id->vendor,
13998                                                 pci_id->device,
13999                                                 bridge);
14000                         if (!bridge) {
14001                                 pci_id++;
14002                                 continue;
14003                         }
14004                         if (bridge->subordinate &&
14005                             (bridge->subordinate->number <=
14006                              tp->pdev->bus->number) &&
14007                             (bridge->subordinate->subordinate >=
14008                              tp->pdev->bus->number)) {
14009                                 tg3_flag_set(tp, 5701_DMA_BUG);
14010                                 pci_dev_put(bridge);
14011                                 break;
14012                         }
14013                 }
14014         }
14015
14016         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14017          * DMA addresses > 40-bit. This bridge may have other additional
14018          * 57xx devices behind it in some 4-port NIC designs for example.
14019          * Any tg3 device found behind the bridge will also need the 40-bit
14020          * DMA workaround.
14021          */
14022         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14023             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14024                 tg3_flag_set(tp, 5780_CLASS);
14025                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14026                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14027         } else {
14028                 struct pci_dev *bridge = NULL;
14029
14030                 do {
14031                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14032                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14033                                                 bridge);
14034                         if (bridge && bridge->subordinate &&
14035                             (bridge->subordinate->number <=
14036                              tp->pdev->bus->number) &&
14037                             (bridge->subordinate->subordinate >=
14038                              tp->pdev->bus->number)) {
14039                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14040                                 pci_dev_put(bridge);
14041                                 break;
14042                         }
14043                 } while (bridge);
14044         }
14045
14046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14047             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14048                 tp->pdev_peer = tg3_find_peer(tp);
14049
14050         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14051             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14052             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14053                 tg3_flag_set(tp, 5717_PLUS);
14054
14055         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14056             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14057                 tg3_flag_set(tp, 57765_CLASS);
14058
14059         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14060                 tg3_flag_set(tp, 57765_PLUS);
14061
14062         /* Intentionally exclude ASIC_REV_5906 */
14063         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14064             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14065             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14066             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14067             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14068             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14069             tg3_flag(tp, 57765_PLUS))
14070                 tg3_flag_set(tp, 5755_PLUS);
14071
14072         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14073             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14074             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14075             tg3_flag(tp, 5755_PLUS) ||
14076             tg3_flag(tp, 5780_CLASS))
14077                 tg3_flag_set(tp, 5750_PLUS);
14078
14079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14080             tg3_flag(tp, 5750_PLUS))
14081                 tg3_flag_set(tp, 5705_PLUS);
14082
14083         /* Determine TSO capabilities */
14084         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14085                 ; /* Do nothing. HW bug. */
14086         else if (tg3_flag(tp, 57765_PLUS))
14087                 tg3_flag_set(tp, HW_TSO_3);
14088         else if (tg3_flag(tp, 5755_PLUS) ||
14089                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14090                 tg3_flag_set(tp, HW_TSO_2);
14091         else if (tg3_flag(tp, 5750_PLUS)) {
14092                 tg3_flag_set(tp, HW_TSO_1);
14093                 tg3_flag_set(tp, TSO_BUG);
14094                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14095                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14096                         tg3_flag_clear(tp, TSO_BUG);
14097         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14098                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14099                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14100                         tg3_flag_set(tp, TSO_BUG);
14101                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14102                         tp->fw_needed = FIRMWARE_TG3TSO5;
14103                 else
14104                         tp->fw_needed = FIRMWARE_TG3TSO;
14105         }
14106
14107         /* Selectively allow TSO based on operating conditions */
14108         if (tg3_flag(tp, HW_TSO_1) ||
14109             tg3_flag(tp, HW_TSO_2) ||
14110             tg3_flag(tp, HW_TSO_3) ||
14111             tp->fw_needed) {
14112                 /* For firmware TSO, assume ASF is disabled.
14113                  * We'll disable TSO later if we discover ASF
14114                  * is enabled in tg3_get_eeprom_hw_cfg().
14115                  */
14116                 tg3_flag_set(tp, TSO_CAPABLE);
14117         } else {
14118                 tg3_flag_clear(tp, TSO_CAPABLE);
14119                 tg3_flag_clear(tp, TSO_BUG);
14120                 tp->fw_needed = NULL;
14121         }
14122
14123         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14124                 tp->fw_needed = FIRMWARE_TG3;
14125
14126         tp->irq_max = 1;
14127
14128         if (tg3_flag(tp, 5750_PLUS)) {
14129                 tg3_flag_set(tp, SUPPORT_MSI);
14130                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14131                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14132                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14133                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14134                      tp->pdev_peer == tp->pdev))
14135                         tg3_flag_clear(tp, SUPPORT_MSI);
14136
14137                 if (tg3_flag(tp, 5755_PLUS) ||
14138                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14139                         tg3_flag_set(tp, 1SHOT_MSI);
14140                 }
14141
14142                 if (tg3_flag(tp, 57765_PLUS)) {
14143                         tg3_flag_set(tp, SUPPORT_MSIX);
14144                         tp->irq_max = TG3_IRQ_MAX_VECS;
14145                         tg3_rss_init_dflt_indir_tbl(tp);
14146                 }
14147         }
14148
14149         if (tg3_flag(tp, 5755_PLUS))
14150                 tg3_flag_set(tp, SHORT_DMA_BUG);
14151
14152         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14153                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14154         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14155                 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
14156
14157         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14158             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14159             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14160                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14161
14162         if (tg3_flag(tp, 57765_PLUS) &&
14163             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14164                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14165
14166         if (!tg3_flag(tp, 5705_PLUS) ||
14167             tg3_flag(tp, 5780_CLASS) ||
14168             tg3_flag(tp, USE_JUMBO_BDFLAG))
14169                 tg3_flag_set(tp, JUMBO_CAPABLE);
14170
14171         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14172                               &pci_state_reg);
14173
14174         if (pci_is_pcie(tp->pdev)) {
14175                 u16 lnkctl;
14176
14177                 tg3_flag_set(tp, PCI_EXPRESS);
14178
14179                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14180                         int readrq = pcie_get_readrq(tp->pdev);
14181                         if (readrq > 2048)
14182                                 pcie_set_readrq(tp->pdev, 2048);
14183                 }
14184
14185                 pci_read_config_word(tp->pdev,
14186                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14187                                      &lnkctl);
14188                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14189                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14190                             ASIC_REV_5906) {
14191                                 tg3_flag_clear(tp, HW_TSO_2);
14192                                 tg3_flag_clear(tp, TSO_CAPABLE);
14193                         }
14194                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14195                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14196                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14197                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14198                                 tg3_flag_set(tp, CLKREQ_BUG);
14199                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14200                         tg3_flag_set(tp, L1PLLPD_EN);
14201                 }
14202         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14203                 /* BCM5785 devices are effectively PCIe devices, and should
14204                  * follow PCIe codepaths, but do not have a PCIe capabilities
14205                  * section.
14206                  */
14207                 tg3_flag_set(tp, PCI_EXPRESS);
14208         } else if (!tg3_flag(tp, 5705_PLUS) ||
14209                    tg3_flag(tp, 5780_CLASS)) {
14210                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14211                 if (!tp->pcix_cap) {
14212                         dev_err(&tp->pdev->dev,
14213                                 "Cannot find PCI-X capability, aborting\n");
14214                         return -EIO;
14215                 }
14216
14217                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14218                         tg3_flag_set(tp, PCIX_MODE);
14219         }
14220
14221         /* If we have an AMD 762 or VIA K8T800 chipset, write
14222          * reordering to the mailbox registers done by the host
14223          * controller can cause major troubles.  We read back from
14224          * every mailbox register write to force the writes to be
14225          * posted to the chip in order.
14226          */
14227         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14228             !tg3_flag(tp, PCI_EXPRESS))
14229                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14230
14231         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14232                              &tp->pci_cacheline_sz);
14233         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14234                              &tp->pci_lat_timer);
14235         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14236             tp->pci_lat_timer < 64) {
14237                 tp->pci_lat_timer = 64;
14238                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14239                                       tp->pci_lat_timer);
14240         }
14241
14242         /* Important! -- It is critical that the PCI-X hw workaround
14243          * situation is decided before the first MMIO register access.
14244          */
14245         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14246                 /* 5700 BX chips need to have their TX producer index
14247                  * mailboxes written twice to workaround a bug.
14248                  */
14249                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14250
14251                 /* If we are in PCI-X mode, enable register write workaround.
14252                  *
14253                  * The workaround is to use indirect register accesses
14254                  * for all chip writes not to mailbox registers.
14255                  */
14256                 if (tg3_flag(tp, PCIX_MODE)) {
14257                         u32 pm_reg;
14258
14259                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14260
14261                         /* The chip can have it's power management PCI config
14262                          * space registers clobbered due to this bug.
14263                          * So explicitly force the chip into D0 here.
14264                          */
14265                         pci_read_config_dword(tp->pdev,
14266                                               tp->pm_cap + PCI_PM_CTRL,
14267                                               &pm_reg);
14268                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14269                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14270                         pci_write_config_dword(tp->pdev,
14271                                                tp->pm_cap + PCI_PM_CTRL,
14272                                                pm_reg);
14273
14274                         /* Also, force SERR#/PERR# in PCI command. */
14275                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14276                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14277                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14278                 }
14279         }
14280
14281         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14282                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14283         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14284                 tg3_flag_set(tp, PCI_32BIT);
14285
14286         /* Chip-specific fixup from Broadcom driver */
14287         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14288             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14289                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14290                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14291         }
14292
14293         /* Default fast path register access methods */
14294         tp->read32 = tg3_read32;
14295         tp->write32 = tg3_write32;
14296         tp->read32_mbox = tg3_read32;
14297         tp->write32_mbox = tg3_write32;
14298         tp->write32_tx_mbox = tg3_write32;
14299         tp->write32_rx_mbox = tg3_write32;
14300
14301         /* Various workaround register access methods */
14302         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14303                 tp->write32 = tg3_write_indirect_reg32;
14304         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14305                  (tg3_flag(tp, PCI_EXPRESS) &&
14306                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14307                 /*
14308                  * Back to back register writes can cause problems on these
14309                  * chips, the workaround is to read back all reg writes
14310                  * except those to mailbox regs.
14311                  *
14312                  * See tg3_write_indirect_reg32().
14313                  */
14314                 tp->write32 = tg3_write_flush_reg32;
14315         }
14316
14317         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14318                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14319                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14320                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14321         }
14322
14323         if (tg3_flag(tp, ICH_WORKAROUND)) {
14324                 tp->read32 = tg3_read_indirect_reg32;
14325                 tp->write32 = tg3_write_indirect_reg32;
14326                 tp->read32_mbox = tg3_read_indirect_mbox;
14327                 tp->write32_mbox = tg3_write_indirect_mbox;
14328                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14329                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14330
14331                 iounmap(tp->regs);
14332                 tp->regs = NULL;
14333
14334                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14335                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14336                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14337         }
14338         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14339                 tp->read32_mbox = tg3_read32_mbox_5906;
14340                 tp->write32_mbox = tg3_write32_mbox_5906;
14341                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14342                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14343         }
14344
14345         if (tp->write32 == tg3_write_indirect_reg32 ||
14346             (tg3_flag(tp, PCIX_MODE) &&
14347              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14348               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14349                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14350
14351         /* The memory arbiter has to be enabled in order for SRAM accesses
14352          * to succeed.  Normally on powerup the tg3 chip firmware will make
14353          * sure it is enabled, but other entities such as system netboot
14354          * code might disable it.
14355          */
14356         val = tr32(MEMARB_MODE);
14357         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14358
14359         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14360         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14361             tg3_flag(tp, 5780_CLASS)) {
14362                 if (tg3_flag(tp, PCIX_MODE)) {
14363                         pci_read_config_dword(tp->pdev,
14364                                               tp->pcix_cap + PCI_X_STATUS,
14365                                               &val);
14366                         tp->pci_fn = val & 0x7;
14367                 }
14368         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14369                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14370                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14371                     NIC_SRAM_CPMUSTAT_SIG) {
14372                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14373                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14374                 }
14375         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14376                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14377                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14378                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14379                     NIC_SRAM_CPMUSTAT_SIG) {
14380                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14381                                      TG3_CPMU_STATUS_FSHFT_5719;
14382                 }
14383         }
14384
14385         /* Get eeprom hw config before calling tg3_set_power_state().
14386          * In particular, the TG3_FLAG_IS_NIC flag must be
14387          * determined before calling tg3_set_power_state() so that
14388          * we know whether or not to switch out of Vaux power.
14389          * When the flag is set, it means that GPIO1 is used for eeprom
14390          * write protect and also implies that it is a LOM where GPIOs
14391          * are not used to switch power.
14392          */
14393         tg3_get_eeprom_hw_cfg(tp);
14394
14395         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14396                 tg3_flag_clear(tp, TSO_CAPABLE);
14397                 tg3_flag_clear(tp, TSO_BUG);
14398                 tp->fw_needed = NULL;
14399         }
14400
14401         if (tg3_flag(tp, ENABLE_APE)) {
14402                 /* Allow reads and writes to the
14403                  * APE register and memory space.
14404                  */
14405                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14406                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14407                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14408                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14409                                        pci_state_reg);
14410
14411                 tg3_ape_lock_init(tp);
14412         }
14413
14414         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14415             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14416             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14417             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14418             tg3_flag(tp, 57765_PLUS))
14419                 tg3_flag_set(tp, CPMU_PRESENT);
14420
14421         /* Set up tp->grc_local_ctrl before calling
14422          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14423          * will bring 5700's external PHY out of reset.
14424          * It is also used as eeprom write protect on LOMs.
14425          */
14426         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14427         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14428             tg3_flag(tp, EEPROM_WRITE_PROT))
14429                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14430                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14431         /* Unused GPIO3 must be driven as output on 5752 because there
14432          * are no pull-up resistors on unused GPIO pins.
14433          */
14434         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14435                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14436
14437         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14438             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14439             tg3_flag(tp, 57765_CLASS))
14440                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14441
14442         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14443             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14444                 /* Turn off the debug UART. */
14445                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14446                 if (tg3_flag(tp, IS_NIC))
14447                         /* Keep VMain power. */
14448                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14449                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14450         }
14451
14452         /* Switch out of Vaux if it is a NIC */
14453         tg3_pwrsrc_switch_to_vmain(tp);
14454
14455         /* Derive initial jumbo mode from MTU assigned in
14456          * ether_setup() via the alloc_etherdev() call
14457          */
14458         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14459                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14460
14461         /* Determine WakeOnLan speed to use. */
14462         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14463             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14464             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14465             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14466                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14467         } else {
14468                 tg3_flag_set(tp, WOL_SPEED_100MB);
14469         }
14470
14471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14472                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14473
14474         /* A few boards don't want Ethernet@WireSpeed phy feature */
14475         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14476             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14477              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14478              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14479             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14480             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14481                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14482
14483         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14484             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14485                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14486         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14487                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14488
14489         if (tg3_flag(tp, 5705_PLUS) &&
14490             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14491             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14492             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14493             !tg3_flag(tp, 57765_PLUS)) {
14494                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14495                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14496                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14497                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14498                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14499                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14500                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14501                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14502                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14503                 } else
14504                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14505         }
14506
14507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14508             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14509                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14510                 if (tp->phy_otp == 0)
14511                         tp->phy_otp = TG3_OTP_DEFAULT;
14512         }
14513
14514         if (tg3_flag(tp, CPMU_PRESENT))
14515                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14516         else
14517                 tp->mi_mode = MAC_MI_MODE_BASE;
14518
14519         tp->coalesce_mode = 0;
14520         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14521             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14522                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14523
14524         /* Set these bits to enable statistics workaround. */
14525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14526             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14527             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14528                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14529                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14530         }
14531
14532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14533             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14534                 tg3_flag_set(tp, USE_PHYLIB);
14535
14536         err = tg3_mdio_init(tp);
14537         if (err)
14538                 return err;
14539
14540         /* Initialize data/descriptor byte/word swapping. */
14541         val = tr32(GRC_MODE);
14542         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14543                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14544                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14545                         GRC_MODE_B2HRX_ENABLE |
14546                         GRC_MODE_HTX2B_ENABLE |
14547                         GRC_MODE_HOST_STACKUP);
14548         else
14549                 val &= GRC_MODE_HOST_STACKUP;
14550
14551         tw32(GRC_MODE, val | tp->grc_mode);
14552
14553         tg3_switch_clocks(tp);
14554
14555         /* Clear this out for sanity. */
14556         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14557
14558         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14559                               &pci_state_reg);
14560         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14561             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14562                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14563
14564                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14565                     chiprevid == CHIPREV_ID_5701_B0 ||
14566                     chiprevid == CHIPREV_ID_5701_B2 ||
14567                     chiprevid == CHIPREV_ID_5701_B5) {
14568                         void __iomem *sram_base;
14569
14570                         /* Write some dummy words into the SRAM status block
14571                          * area, see if it reads back correctly.  If the return
14572                          * value is bad, force enable the PCIX workaround.
14573                          */
14574                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14575
14576                         writel(0x00000000, sram_base);
14577                         writel(0x00000000, sram_base + 4);
14578                         writel(0xffffffff, sram_base + 4);
14579                         if (readl(sram_base) != 0x00000000)
14580                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14581                 }
14582         }
14583
14584         udelay(50);
14585         tg3_nvram_init(tp);
14586
14587         grc_misc_cfg = tr32(GRC_MISC_CFG);
14588         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14589
14590         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14591             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14592              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14593                 tg3_flag_set(tp, IS_5788);
14594
14595         if (!tg3_flag(tp, IS_5788) &&
14596             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14597                 tg3_flag_set(tp, TAGGED_STATUS);
14598         if (tg3_flag(tp, TAGGED_STATUS)) {
14599                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14600                                       HOSTCC_MODE_CLRTICK_TXBD);
14601
14602                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14603                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14604                                        tp->misc_host_ctrl);
14605         }
14606
14607         /* Preserve the APE MAC_MODE bits */
14608         if (tg3_flag(tp, ENABLE_APE))
14609                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14610         else
14611                 tp->mac_mode = 0;
14612
14613         /* these are limited to 10/100 only */
14614         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14615              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14616             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14617              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14618              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14619               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14620               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14621             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14622              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14623               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14624               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14625             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14626             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14627             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14628             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14629                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14630
14631         err = tg3_phy_probe(tp);
14632         if (err) {
14633                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14634                 /* ... but do not return immediately ... */
14635                 tg3_mdio_fini(tp);
14636         }
14637
14638         tg3_read_vpd(tp);
14639         tg3_read_fw_ver(tp);
14640
14641         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14642                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14643         } else {
14644                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14645                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14646                 else
14647                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14648         }
14649
14650         /* 5700 {AX,BX} chips have a broken status block link
14651          * change bit implementation, so we must use the
14652          * status register in those cases.
14653          */
14654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14655                 tg3_flag_set(tp, USE_LINKCHG_REG);
14656         else
14657                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14658
14659         /* The led_ctrl is set during tg3_phy_probe, here we might
14660          * have to force the link status polling mechanism based
14661          * upon subsystem IDs.
14662          */
14663         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14665             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14666                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14667                 tg3_flag_set(tp, USE_LINKCHG_REG);
14668         }
14669
14670         /* For all SERDES we poll the MAC status register. */
14671         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14672                 tg3_flag_set(tp, POLL_SERDES);
14673         else
14674                 tg3_flag_clear(tp, POLL_SERDES);
14675
14676         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14677         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14678         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14679             tg3_flag(tp, PCIX_MODE)) {
14680                 tp->rx_offset = NET_SKB_PAD;
14681 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14682                 tp->rx_copy_thresh = ~(u16)0;
14683 #endif
14684         }
14685
14686         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14687         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14688         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14689
14690         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14691
14692         /* Increment the rx prod index on the rx std ring by at most
14693          * 8 for these chips to workaround hw errata.
14694          */
14695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14696             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14697             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14698                 tp->rx_std_max_post = 8;
14699
14700         if (tg3_flag(tp, ASPM_WORKAROUND))
14701                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14702                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14703
14704         return err;
14705 }
14706
14707 #ifdef CONFIG_SPARC
14708 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14709 {
14710         struct net_device *dev = tp->dev;
14711         struct pci_dev *pdev = tp->pdev;
14712         struct device_node *dp = pci_device_to_OF_node(pdev);
14713         const unsigned char *addr;
14714         int len;
14715
14716         addr = of_get_property(dp, "local-mac-address", &len);
14717         if (addr && len == 6) {
14718                 memcpy(dev->dev_addr, addr, 6);
14719                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14720                 return 0;
14721         }
14722         return -ENODEV;
14723 }
14724
14725 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14726 {
14727         struct net_device *dev = tp->dev;
14728
14729         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14730         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14731         return 0;
14732 }
14733 #endif
14734
14735 static int __devinit tg3_get_device_address(struct tg3 *tp)
14736 {
14737         struct net_device *dev = tp->dev;
14738         u32 hi, lo, mac_offset;
14739         int addr_ok = 0;
14740
14741 #ifdef CONFIG_SPARC
14742         if (!tg3_get_macaddr_sparc(tp))
14743                 return 0;
14744 #endif
14745
14746         mac_offset = 0x7c;
14747         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14748             tg3_flag(tp, 5780_CLASS)) {
14749                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14750                         mac_offset = 0xcc;
14751                 if (tg3_nvram_lock(tp))
14752                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14753                 else
14754                         tg3_nvram_unlock(tp);
14755         } else if (tg3_flag(tp, 5717_PLUS)) {
14756                 if (tp->pci_fn & 1)
14757                         mac_offset = 0xcc;
14758                 if (tp->pci_fn > 1)
14759                         mac_offset += 0x18c;
14760         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14761                 mac_offset = 0x10;
14762
14763         /* First try to get it from MAC address mailbox. */
14764         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14765         if ((hi >> 16) == 0x484b) {
14766                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14767                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14768
14769                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14770                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14771                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14772                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14773                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14774
14775                 /* Some old bootcode may report a 0 MAC address in SRAM */
14776                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14777         }
14778         if (!addr_ok) {
14779                 /* Next, try NVRAM. */
14780                 if (!tg3_flag(tp, NO_NVRAM) &&
14781                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14782                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14783                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14784                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14785                 }
14786                 /* Finally just fetch it out of the MAC control regs. */
14787                 else {
14788                         hi = tr32(MAC_ADDR_0_HIGH);
14789                         lo = tr32(MAC_ADDR_0_LOW);
14790
14791                         dev->dev_addr[5] = lo & 0xff;
14792                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14793                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14794                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14795                         dev->dev_addr[1] = hi & 0xff;
14796                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14797                 }
14798         }
14799
14800         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14801 #ifdef CONFIG_SPARC
14802                 if (!tg3_get_default_macaddr_sparc(tp))
14803                         return 0;
14804 #endif
14805                 return -EINVAL;
14806         }
14807         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14808         return 0;
14809 }
14810
14811 #define BOUNDARY_SINGLE_CACHELINE       1
14812 #define BOUNDARY_MULTI_CACHELINE        2
14813
14814 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14815 {
14816         int cacheline_size;
14817         u8 byte;
14818         int goal;
14819
14820         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14821         if (byte == 0)
14822                 cacheline_size = 1024;
14823         else
14824                 cacheline_size = (int) byte * 4;
14825
14826         /* On 5703 and later chips, the boundary bits have no
14827          * effect.
14828          */
14829         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14830             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14831             !tg3_flag(tp, PCI_EXPRESS))
14832                 goto out;
14833
14834 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14835         goal = BOUNDARY_MULTI_CACHELINE;
14836 #else
14837 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14838         goal = BOUNDARY_SINGLE_CACHELINE;
14839 #else
14840         goal = 0;
14841 #endif
14842 #endif
14843
14844         if (tg3_flag(tp, 57765_PLUS)) {
14845                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14846                 goto out;
14847         }
14848
14849         if (!goal)
14850                 goto out;
14851
14852         /* PCI controllers on most RISC systems tend to disconnect
14853          * when a device tries to burst across a cache-line boundary.
14854          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14855          *
14856          * Unfortunately, for PCI-E there are only limited
14857          * write-side controls for this, and thus for reads
14858          * we will still get the disconnects.  We'll also waste
14859          * these PCI cycles for both read and write for chips
14860          * other than 5700 and 5701 which do not implement the
14861          * boundary bits.
14862          */
14863         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14864                 switch (cacheline_size) {
14865                 case 16:
14866                 case 32:
14867                 case 64:
14868                 case 128:
14869                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14870                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14871                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14872                         } else {
14873                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14874                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14875                         }
14876                         break;
14877
14878                 case 256:
14879                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14880                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14881                         break;
14882
14883                 default:
14884                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14885                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14886                         break;
14887                 }
14888         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14889                 switch (cacheline_size) {
14890                 case 16:
14891                 case 32:
14892                 case 64:
14893                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14894                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14895                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14896                                 break;
14897                         }
14898                         /* fallthrough */
14899                 case 128:
14900                 default:
14901                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14902                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14903                         break;
14904                 }
14905         } else {
14906                 switch (cacheline_size) {
14907                 case 16:
14908                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14909                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14910                                         DMA_RWCTRL_WRITE_BNDRY_16);
14911                                 break;
14912                         }
14913                         /* fallthrough */
14914                 case 32:
14915                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14916                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14917                                         DMA_RWCTRL_WRITE_BNDRY_32);
14918                                 break;
14919                         }
14920                         /* fallthrough */
14921                 case 64:
14922                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14923                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14924                                         DMA_RWCTRL_WRITE_BNDRY_64);
14925                                 break;
14926                         }
14927                         /* fallthrough */
14928                 case 128:
14929                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14930                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14931                                         DMA_RWCTRL_WRITE_BNDRY_128);
14932                                 break;
14933                         }
14934                         /* fallthrough */
14935                 case 256:
14936                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14937                                 DMA_RWCTRL_WRITE_BNDRY_256);
14938                         break;
14939                 case 512:
14940                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14941                                 DMA_RWCTRL_WRITE_BNDRY_512);
14942                         break;
14943                 case 1024:
14944                 default:
14945                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14946                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14947                         break;
14948                 }
14949         }
14950
14951 out:
14952         return val;
14953 }
14954
14955 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14956 {
14957         struct tg3_internal_buffer_desc test_desc;
14958         u32 sram_dma_descs;
14959         int i, ret;
14960
14961         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14962
14963         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14964         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14965         tw32(RDMAC_STATUS, 0);
14966         tw32(WDMAC_STATUS, 0);
14967
14968         tw32(BUFMGR_MODE, 0);
14969         tw32(FTQ_RESET, 0);
14970
14971         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14972         test_desc.addr_lo = buf_dma & 0xffffffff;
14973         test_desc.nic_mbuf = 0x00002100;
14974         test_desc.len = size;
14975
14976         /*
14977          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14978          * the *second* time the tg3 driver was getting loaded after an
14979          * initial scan.
14980          *
14981          * Broadcom tells me:
14982          *   ...the DMA engine is connected to the GRC block and a DMA
14983          *   reset may affect the GRC block in some unpredictable way...
14984          *   The behavior of resets to individual blocks has not been tested.
14985          *
14986          * Broadcom noted the GRC reset will also reset all sub-components.
14987          */
14988         if (to_device) {
14989                 test_desc.cqid_sqid = (13 << 8) | 2;
14990
14991                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14992                 udelay(40);
14993         } else {
14994                 test_desc.cqid_sqid = (16 << 8) | 7;
14995
14996                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14997                 udelay(40);
14998         }
14999         test_desc.flags = 0x00000005;
15000
15001         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15002                 u32 val;
15003
15004                 val = *(((u32 *)&test_desc) + i);
15005                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15006                                        sram_dma_descs + (i * sizeof(u32)));
15007                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15008         }
15009         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15010
15011         if (to_device)
15012                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15013         else
15014                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15015
15016         ret = -ENODEV;
15017         for (i = 0; i < 40; i++) {
15018                 u32 val;
15019
15020                 if (to_device)
15021                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15022                 else
15023                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15024                 if ((val & 0xffff) == sram_dma_descs) {
15025                         ret = 0;
15026                         break;
15027                 }
15028
15029                 udelay(100);
15030         }
15031
15032         return ret;
15033 }
15034
15035 #define TEST_BUFFER_SIZE        0x2000
15036
15037 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15038         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15039         { },
15040 };
15041
15042 static int __devinit tg3_test_dma(struct tg3 *tp)
15043 {
15044         dma_addr_t buf_dma;
15045         u32 *buf, saved_dma_rwctrl;
15046         int ret = 0;
15047
15048         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15049                                  &buf_dma, GFP_KERNEL);
15050         if (!buf) {
15051                 ret = -ENOMEM;
15052                 goto out_nofree;
15053         }
15054
15055         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15056                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15057
15058         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15059
15060         if (tg3_flag(tp, 57765_PLUS))
15061                 goto out;
15062
15063         if (tg3_flag(tp, PCI_EXPRESS)) {
15064                 /* DMA read watermark not used on PCIE */
15065                 tp->dma_rwctrl |= 0x00180000;
15066         } else if (!tg3_flag(tp, PCIX_MODE)) {
15067                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15068                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15069                         tp->dma_rwctrl |= 0x003f0000;
15070                 else
15071                         tp->dma_rwctrl |= 0x003f000f;
15072         } else {
15073                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15074                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15075                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15076                         u32 read_water = 0x7;
15077
15078                         /* If the 5704 is behind the EPB bridge, we can
15079                          * do the less restrictive ONE_DMA workaround for
15080                          * better performance.
15081                          */
15082                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15083                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15084                                 tp->dma_rwctrl |= 0x8000;
15085                         else if (ccval == 0x6 || ccval == 0x7)
15086                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15087
15088                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15089                                 read_water = 4;
15090                         /* Set bit 23 to enable PCIX hw bug fix */
15091                         tp->dma_rwctrl |=
15092                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15093                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15094                                 (1 << 23);
15095                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15096                         /* 5780 always in PCIX mode */
15097                         tp->dma_rwctrl |= 0x00144000;
15098                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15099                         /* 5714 always in PCIX mode */
15100                         tp->dma_rwctrl |= 0x00148000;
15101                 } else {
15102                         tp->dma_rwctrl |= 0x001b000f;
15103                 }
15104         }
15105
15106         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15107             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15108                 tp->dma_rwctrl &= 0xfffffff0;
15109
15110         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15111             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15112                 /* Remove this if it causes problems for some boards. */
15113                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15114
15115                 /* On 5700/5701 chips, we need to set this bit.
15116                  * Otherwise the chip will issue cacheline transactions
15117                  * to streamable DMA memory with not all the byte
15118                  * enables turned on.  This is an error on several
15119                  * RISC PCI controllers, in particular sparc64.
15120                  *
15121                  * On 5703/5704 chips, this bit has been reassigned
15122                  * a different meaning.  In particular, it is used
15123                  * on those chips to enable a PCI-X workaround.
15124                  */
15125                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15126         }
15127
15128         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15129
15130 #if 0
15131         /* Unneeded, already done by tg3_get_invariants.  */
15132         tg3_switch_clocks(tp);
15133 #endif
15134
15135         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15136             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15137                 goto out;
15138
15139         /* It is best to perform DMA test with maximum write burst size
15140          * to expose the 5700/5701 write DMA bug.
15141          */
15142         saved_dma_rwctrl = tp->dma_rwctrl;
15143         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15144         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15145
15146         while (1) {
15147                 u32 *p = buf, i;
15148
15149                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15150                         p[i] = i;
15151
15152                 /* Send the buffer to the chip. */
15153                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15154                 if (ret) {
15155                         dev_err(&tp->pdev->dev,
15156                                 "%s: Buffer write failed. err = %d\n",
15157                                 __func__, ret);
15158                         break;
15159                 }
15160
15161 #if 0
15162                 /* validate data reached card RAM correctly. */
15163                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15164                         u32 val;
15165                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15166                         if (le32_to_cpu(val) != p[i]) {
15167                                 dev_err(&tp->pdev->dev,
15168                                         "%s: Buffer corrupted on device! "
15169                                         "(%d != %d)\n", __func__, val, i);
15170                                 /* ret = -ENODEV here? */
15171                         }
15172                         p[i] = 0;
15173                 }
15174 #endif
15175                 /* Now read it back. */
15176                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15177                 if (ret) {
15178                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15179                                 "err = %d\n", __func__, ret);
15180                         break;
15181                 }
15182
15183                 /* Verify it. */
15184                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15185                         if (p[i] == i)
15186                                 continue;
15187
15188                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15189                             DMA_RWCTRL_WRITE_BNDRY_16) {
15190                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15191                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15192                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15193                                 break;
15194                         } else {
15195                                 dev_err(&tp->pdev->dev,
15196                                         "%s: Buffer corrupted on read back! "
15197                                         "(%d != %d)\n", __func__, p[i], i);
15198                                 ret = -ENODEV;
15199                                 goto out;
15200                         }
15201                 }
15202
15203                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15204                         /* Success. */
15205                         ret = 0;
15206                         break;
15207                 }
15208         }
15209         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15210             DMA_RWCTRL_WRITE_BNDRY_16) {
15211                 /* DMA test passed without adjusting DMA boundary,
15212                  * now look for chipsets that are known to expose the
15213                  * DMA bug without failing the test.
15214                  */
15215                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15216                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15217                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15218                 } else {
15219                         /* Safe to use the calculated DMA boundary. */
15220                         tp->dma_rwctrl = saved_dma_rwctrl;
15221                 }
15222
15223                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15224         }
15225
15226 out:
15227         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15228 out_nofree:
15229         return ret;
15230 }
15231
15232 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15233 {
15234         if (tg3_flag(tp, 57765_PLUS)) {
15235                 tp->bufmgr_config.mbuf_read_dma_low_water =
15236                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15237                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15238                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15239                 tp->bufmgr_config.mbuf_high_water =
15240                         DEFAULT_MB_HIGH_WATER_57765;
15241
15242                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15243                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15244                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15245                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15246                 tp->bufmgr_config.mbuf_high_water_jumbo =
15247                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15248         } else if (tg3_flag(tp, 5705_PLUS)) {
15249                 tp->bufmgr_config.mbuf_read_dma_low_water =
15250                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15251                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15252                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15253                 tp->bufmgr_config.mbuf_high_water =
15254                         DEFAULT_MB_HIGH_WATER_5705;
15255                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15256                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15257                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15258                         tp->bufmgr_config.mbuf_high_water =
15259                                 DEFAULT_MB_HIGH_WATER_5906;
15260                 }
15261
15262                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15263                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15264                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15265                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15266                 tp->bufmgr_config.mbuf_high_water_jumbo =
15267                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15268         } else {
15269                 tp->bufmgr_config.mbuf_read_dma_low_water =
15270                         DEFAULT_MB_RDMA_LOW_WATER;
15271                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15272                         DEFAULT_MB_MACRX_LOW_WATER;
15273                 tp->bufmgr_config.mbuf_high_water =
15274                         DEFAULT_MB_HIGH_WATER;
15275
15276                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15277                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15278                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15279                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15280                 tp->bufmgr_config.mbuf_high_water_jumbo =
15281                         DEFAULT_MB_HIGH_WATER_JUMBO;
15282         }
15283
15284         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15285         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15286 }
15287
15288 static char * __devinit tg3_phy_string(struct tg3 *tp)
15289 {
15290         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15291         case TG3_PHY_ID_BCM5400:        return "5400";
15292         case TG3_PHY_ID_BCM5401:        return "5401";
15293         case TG3_PHY_ID_BCM5411:        return "5411";
15294         case TG3_PHY_ID_BCM5701:        return "5701";
15295         case TG3_PHY_ID_BCM5703:        return "5703";
15296         case TG3_PHY_ID_BCM5704:        return "5704";
15297         case TG3_PHY_ID_BCM5705:        return "5705";
15298         case TG3_PHY_ID_BCM5750:        return "5750";
15299         case TG3_PHY_ID_BCM5752:        return "5752";
15300         case TG3_PHY_ID_BCM5714:        return "5714";
15301         case TG3_PHY_ID_BCM5780:        return "5780";
15302         case TG3_PHY_ID_BCM5755:        return "5755";
15303         case TG3_PHY_ID_BCM5787:        return "5787";
15304         case TG3_PHY_ID_BCM5784:        return "5784";
15305         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15306         case TG3_PHY_ID_BCM5906:        return "5906";
15307         case TG3_PHY_ID_BCM5761:        return "5761";
15308         case TG3_PHY_ID_BCM5718C:       return "5718C";
15309         case TG3_PHY_ID_BCM5718S:       return "5718S";
15310         case TG3_PHY_ID_BCM57765:       return "57765";
15311         case TG3_PHY_ID_BCM5719C:       return "5719C";
15312         case TG3_PHY_ID_BCM5720C:       return "5720C";
15313         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15314         case 0:                 return "serdes";
15315         default:                return "unknown";
15316         }
15317 }
15318
15319 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15320 {
15321         if (tg3_flag(tp, PCI_EXPRESS)) {
15322                 strcpy(str, "PCI Express");
15323                 return str;
15324         } else if (tg3_flag(tp, PCIX_MODE)) {
15325                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15326
15327                 strcpy(str, "PCIX:");
15328
15329                 if ((clock_ctrl == 7) ||
15330                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15331                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15332                         strcat(str, "133MHz");
15333                 else if (clock_ctrl == 0)
15334                         strcat(str, "33MHz");
15335                 else if (clock_ctrl == 2)
15336                         strcat(str, "50MHz");
15337                 else if (clock_ctrl == 4)
15338                         strcat(str, "66MHz");
15339                 else if (clock_ctrl == 6)
15340                         strcat(str, "100MHz");
15341         } else {
15342                 strcpy(str, "PCI:");
15343                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15344                         strcat(str, "66MHz");
15345                 else
15346                         strcat(str, "33MHz");
15347         }
15348         if (tg3_flag(tp, PCI_32BIT))
15349                 strcat(str, ":32-bit");
15350         else
15351                 strcat(str, ":64-bit");
15352         return str;
15353 }
15354
15355 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15356 {
15357         struct pci_dev *peer;
15358         unsigned int func, devnr = tp->pdev->devfn & ~7;
15359
15360         for (func = 0; func < 8; func++) {
15361                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15362                 if (peer && peer != tp->pdev)
15363                         break;
15364                 pci_dev_put(peer);
15365         }
15366         /* 5704 can be configured in single-port mode, set peer to
15367          * tp->pdev in that case.
15368          */
15369         if (!peer) {
15370                 peer = tp->pdev;
15371                 return peer;
15372         }
15373
15374         /*
15375          * We don't need to keep the refcount elevated; there's no way
15376          * to remove one half of this device without removing the other
15377          */
15378         pci_dev_put(peer);
15379
15380         return peer;
15381 }
15382
15383 static void __devinit tg3_init_coal(struct tg3 *tp)
15384 {
15385         struct ethtool_coalesce *ec = &tp->coal;
15386
15387         memset(ec, 0, sizeof(*ec));
15388         ec->cmd = ETHTOOL_GCOALESCE;
15389         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15390         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15391         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15392         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15393         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15394         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15395         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15396         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15397         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15398
15399         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15400                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15401                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15402                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15403                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15404                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15405         }
15406
15407         if (tg3_flag(tp, 5705_PLUS)) {
15408                 ec->rx_coalesce_usecs_irq = 0;
15409                 ec->tx_coalesce_usecs_irq = 0;
15410                 ec->stats_block_coalesce_usecs = 0;
15411         }
15412 }
15413
15414 static const struct net_device_ops tg3_netdev_ops = {
15415         .ndo_open               = tg3_open,
15416         .ndo_stop               = tg3_close,
15417         .ndo_start_xmit         = tg3_start_xmit,
15418         .ndo_get_stats64        = tg3_get_stats64,
15419         .ndo_validate_addr      = eth_validate_addr,
15420         .ndo_set_rx_mode        = tg3_set_rx_mode,
15421         .ndo_set_mac_address    = tg3_set_mac_addr,
15422         .ndo_do_ioctl           = tg3_ioctl,
15423         .ndo_tx_timeout         = tg3_tx_timeout,
15424         .ndo_change_mtu         = tg3_change_mtu,
15425         .ndo_fix_features       = tg3_fix_features,
15426         .ndo_set_features       = tg3_set_features,
15427 #ifdef CONFIG_NET_POLL_CONTROLLER
15428         .ndo_poll_controller    = tg3_poll_controller,
15429 #endif
15430 };
15431
15432 static int __devinit tg3_init_one(struct pci_dev *pdev,
15433                                   const struct pci_device_id *ent)
15434 {
15435         struct net_device *dev;
15436         struct tg3 *tp;
15437         int i, err, pm_cap;
15438         u32 sndmbx, rcvmbx, intmbx;
15439         char str[40];
15440         u64 dma_mask, persist_dma_mask;
15441         netdev_features_t features = 0;
15442
15443         printk_once(KERN_INFO "%s\n", version);
15444
15445         err = pci_enable_device(pdev);
15446         if (err) {
15447                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15448                 return err;
15449         }
15450
15451         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15452         if (err) {
15453                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15454                 goto err_out_disable_pdev;
15455         }
15456
15457         pci_set_master(pdev);
15458
15459         /* Find power-management capability. */
15460         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15461         if (pm_cap == 0) {
15462                 dev_err(&pdev->dev,
15463                         "Cannot find Power Management capability, aborting\n");
15464                 err = -EIO;
15465                 goto err_out_free_res;
15466         }
15467
15468         err = pci_set_power_state(pdev, PCI_D0);
15469         if (err) {
15470                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15471                 goto err_out_free_res;
15472         }
15473
15474         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15475         if (!dev) {
15476                 err = -ENOMEM;
15477                 goto err_out_power_down;
15478         }
15479
15480         SET_NETDEV_DEV(dev, &pdev->dev);
15481
15482         tp = netdev_priv(dev);
15483         tp->pdev = pdev;
15484         tp->dev = dev;
15485         tp->pm_cap = pm_cap;
15486         tp->rx_mode = TG3_DEF_RX_MODE;
15487         tp->tx_mode = TG3_DEF_TX_MODE;
15488
15489         if (tg3_debug > 0)
15490                 tp->msg_enable = tg3_debug;
15491         else
15492                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15493
15494         /* The word/byte swap controls here control register access byte
15495          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15496          * setting below.
15497          */
15498         tp->misc_host_ctrl =
15499                 MISC_HOST_CTRL_MASK_PCI_INT |
15500                 MISC_HOST_CTRL_WORD_SWAP |
15501                 MISC_HOST_CTRL_INDIR_ACCESS |
15502                 MISC_HOST_CTRL_PCISTATE_RW;
15503
15504         /* The NONFRM (non-frame) byte/word swap controls take effect
15505          * on descriptor entries, anything which isn't packet data.
15506          *
15507          * The StrongARM chips on the board (one for tx, one for rx)
15508          * are running in big-endian mode.
15509          */
15510         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15511                         GRC_MODE_WSWAP_NONFRM_DATA);
15512 #ifdef __BIG_ENDIAN
15513         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15514 #endif
15515         spin_lock_init(&tp->lock);
15516         spin_lock_init(&tp->indirect_lock);
15517         INIT_WORK(&tp->reset_task, tg3_reset_task);
15518
15519         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15520         if (!tp->regs) {
15521                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15522                 err = -ENOMEM;
15523                 goto err_out_free_dev;
15524         }
15525
15526         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15527             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15528             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15529             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15530             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15531             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15532             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15533             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15534                 tg3_flag_set(tp, ENABLE_APE);
15535                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15536                 if (!tp->aperegs) {
15537                         dev_err(&pdev->dev,
15538                                 "Cannot map APE registers, aborting\n");
15539                         err = -ENOMEM;
15540                         goto err_out_iounmap;
15541                 }
15542         }
15543
15544         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15545         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15546
15547         dev->ethtool_ops = &tg3_ethtool_ops;
15548         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15549         dev->netdev_ops = &tg3_netdev_ops;
15550         dev->irq = pdev->irq;
15551
15552         err = tg3_get_invariants(tp);
15553         if (err) {
15554                 dev_err(&pdev->dev,
15555                         "Problem fetching invariants of chip, aborting\n");
15556                 goto err_out_apeunmap;
15557         }
15558
15559         /* The EPB bridge inside 5714, 5715, and 5780 and any
15560          * device behind the EPB cannot support DMA addresses > 40-bit.
15561          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15562          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15563          * do DMA address check in tg3_start_xmit().
15564          */
15565         if (tg3_flag(tp, IS_5788))
15566                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15567         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15568                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15569 #ifdef CONFIG_HIGHMEM
15570                 dma_mask = DMA_BIT_MASK(64);
15571 #endif
15572         } else
15573                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15574
15575         /* Configure DMA attributes. */
15576         if (dma_mask > DMA_BIT_MASK(32)) {
15577                 err = pci_set_dma_mask(pdev, dma_mask);
15578                 if (!err) {
15579                         features |= NETIF_F_HIGHDMA;
15580                         err = pci_set_consistent_dma_mask(pdev,
15581                                                           persist_dma_mask);
15582                         if (err < 0) {
15583                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15584                                         "DMA for consistent allocations\n");
15585                                 goto err_out_apeunmap;
15586                         }
15587                 }
15588         }
15589         if (err || dma_mask == DMA_BIT_MASK(32)) {
15590                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15591                 if (err) {
15592                         dev_err(&pdev->dev,
15593                                 "No usable DMA configuration, aborting\n");
15594                         goto err_out_apeunmap;
15595                 }
15596         }
15597
15598         tg3_init_bufmgr_config(tp);
15599
15600         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15601
15602         /* 5700 B0 chips do not support checksumming correctly due
15603          * to hardware bugs.
15604          */
15605         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15606                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15607
15608                 if (tg3_flag(tp, 5755_PLUS))
15609                         features |= NETIF_F_IPV6_CSUM;
15610         }
15611
15612         /* TSO is on by default on chips that support hardware TSO.
15613          * Firmware TSO on older chips gives lower performance, so it
15614          * is off by default, but can be enabled using ethtool.
15615          */
15616         if ((tg3_flag(tp, HW_TSO_1) ||
15617              tg3_flag(tp, HW_TSO_2) ||
15618              tg3_flag(tp, HW_TSO_3)) &&
15619             (features & NETIF_F_IP_CSUM))
15620                 features |= NETIF_F_TSO;
15621         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15622                 if (features & NETIF_F_IPV6_CSUM)
15623                         features |= NETIF_F_TSO6;
15624                 if (tg3_flag(tp, HW_TSO_3) ||
15625                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15626                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15627                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15628                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15629                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15630                         features |= NETIF_F_TSO_ECN;
15631         }
15632
15633         dev->features |= features;
15634         dev->vlan_features |= features;
15635
15636         /*
15637          * Add loopback capability only for a subset of devices that support
15638          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15639          * loopback for the remaining devices.
15640          */
15641         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15642             !tg3_flag(tp, CPMU_PRESENT))
15643                 /* Add the loopback capability */
15644                 features |= NETIF_F_LOOPBACK;
15645
15646         dev->hw_features |= features;
15647
15648         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15649             !tg3_flag(tp, TSO_CAPABLE) &&
15650             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15651                 tg3_flag_set(tp, MAX_RXPEND_64);
15652                 tp->rx_pending = 63;
15653         }
15654
15655         err = tg3_get_device_address(tp);
15656         if (err) {
15657                 dev_err(&pdev->dev,
15658                         "Could not obtain valid ethernet address, aborting\n");
15659                 goto err_out_apeunmap;
15660         }
15661
15662         /*
15663          * Reset chip in case UNDI or EFI driver did not shutdown
15664          * DMA self test will enable WDMAC and we'll see (spurious)
15665          * pending DMA on the PCI bus at that point.
15666          */
15667         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15668             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15669                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15670                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15671         }
15672
15673         err = tg3_test_dma(tp);
15674         if (err) {
15675                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15676                 goto err_out_apeunmap;
15677         }
15678
15679         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15680         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15681         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15682         for (i = 0; i < tp->irq_max; i++) {
15683                 struct tg3_napi *tnapi = &tp->napi[i];
15684
15685                 tnapi->tp = tp;
15686                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15687
15688                 tnapi->int_mbox = intmbx;
15689                 if (i <= 4)
15690                         intmbx += 0x8;
15691                 else
15692                         intmbx += 0x4;
15693
15694                 tnapi->consmbox = rcvmbx;
15695                 tnapi->prodmbox = sndmbx;
15696
15697                 if (i)
15698                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15699                 else
15700                         tnapi->coal_now = HOSTCC_MODE_NOW;
15701
15702                 if (!tg3_flag(tp, SUPPORT_MSIX))
15703                         break;
15704
15705                 /*
15706                  * If we support MSIX, we'll be using RSS.  If we're using
15707                  * RSS, the first vector only handles link interrupts and the
15708                  * remaining vectors handle rx and tx interrupts.  Reuse the
15709                  * mailbox values for the next iteration.  The values we setup
15710                  * above are still useful for the single vectored mode.
15711                  */
15712                 if (!i)
15713                         continue;
15714
15715                 rcvmbx += 0x8;
15716
15717                 if (sndmbx & 0x4)
15718                         sndmbx -= 0x4;
15719                 else
15720                         sndmbx += 0xc;
15721         }
15722
15723         tg3_init_coal(tp);
15724
15725         pci_set_drvdata(pdev, dev);
15726
15727         if (tg3_flag(tp, 5717_PLUS)) {
15728                 /* Resume a low-power mode */
15729                 tg3_frob_aux_power(tp, false);
15730         }
15731
15732         err = register_netdev(dev);
15733         if (err) {
15734                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15735                 goto err_out_apeunmap;
15736         }
15737
15738         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15739                     tp->board_part_number,
15740                     tp->pci_chip_rev_id,
15741                     tg3_bus_string(tp, str),
15742                     dev->dev_addr);
15743
15744         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15745                 struct phy_device *phydev;
15746                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15747                 netdev_info(dev,
15748                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15749                             phydev->drv->name, dev_name(&phydev->dev));
15750         } else {
15751                 char *ethtype;
15752
15753                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15754                         ethtype = "10/100Base-TX";
15755                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15756                         ethtype = "1000Base-SX";
15757                 else
15758                         ethtype = "10/100/1000Base-T";
15759
15760                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15761                             "(WireSpeed[%d], EEE[%d])\n",
15762                             tg3_phy_string(tp), ethtype,
15763                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15764                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15765         }
15766
15767         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15768                     (dev->features & NETIF_F_RXCSUM) != 0,
15769                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15770                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15771                     tg3_flag(tp, ENABLE_ASF) != 0,
15772                     tg3_flag(tp, TSO_CAPABLE) != 0);
15773         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15774                     tp->dma_rwctrl,
15775                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15776                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15777
15778         pci_save_state(pdev);
15779
15780         return 0;
15781
15782 err_out_apeunmap:
15783         if (tp->aperegs) {
15784                 iounmap(tp->aperegs);
15785                 tp->aperegs = NULL;
15786         }
15787
15788 err_out_iounmap:
15789         if (tp->regs) {
15790                 iounmap(tp->regs);
15791                 tp->regs = NULL;
15792         }
15793
15794 err_out_free_dev:
15795         free_netdev(dev);
15796
15797 err_out_power_down:
15798         pci_set_power_state(pdev, PCI_D3hot);
15799
15800 err_out_free_res:
15801         pci_release_regions(pdev);
15802
15803 err_out_disable_pdev:
15804         pci_disable_device(pdev);
15805         pci_set_drvdata(pdev, NULL);
15806         return err;
15807 }
15808
15809 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15810 {
15811         struct net_device *dev = pci_get_drvdata(pdev);
15812
15813         if (dev) {
15814                 struct tg3 *tp = netdev_priv(dev);
15815
15816                 if (tp->fw)
15817                         release_firmware(tp->fw);
15818
15819                 tg3_reset_task_cancel(tp);
15820
15821                 if (tg3_flag(tp, USE_PHYLIB)) {
15822                         tg3_phy_fini(tp);
15823                         tg3_mdio_fini(tp);
15824                 }
15825
15826                 unregister_netdev(dev);
15827                 if (tp->aperegs) {
15828                         iounmap(tp->aperegs);
15829                         tp->aperegs = NULL;
15830                 }
15831                 if (tp->regs) {
15832                         iounmap(tp->regs);
15833                         tp->regs = NULL;
15834                 }
15835                 free_netdev(dev);
15836                 pci_release_regions(pdev);
15837                 pci_disable_device(pdev);
15838                 pci_set_drvdata(pdev, NULL);
15839         }
15840 }
15841
15842 #ifdef CONFIG_PM_SLEEP
15843 static int tg3_suspend(struct device *device)
15844 {
15845         struct pci_dev *pdev = to_pci_dev(device);
15846         struct net_device *dev = pci_get_drvdata(pdev);
15847         struct tg3 *tp = netdev_priv(dev);
15848         int err;
15849
15850         if (!netif_running(dev))
15851                 return 0;
15852
15853         tg3_reset_task_cancel(tp);
15854         tg3_phy_stop(tp);
15855         tg3_netif_stop(tp);
15856
15857         del_timer_sync(&tp->timer);
15858
15859         tg3_full_lock(tp, 1);
15860         tg3_disable_ints(tp);
15861         tg3_full_unlock(tp);
15862
15863         netif_device_detach(dev);
15864
15865         tg3_full_lock(tp, 0);
15866         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15867         tg3_flag_clear(tp, INIT_COMPLETE);
15868         tg3_full_unlock(tp);
15869
15870         err = tg3_power_down_prepare(tp);
15871         if (err) {
15872                 int err2;
15873
15874                 tg3_full_lock(tp, 0);
15875
15876                 tg3_flag_set(tp, INIT_COMPLETE);
15877                 err2 = tg3_restart_hw(tp, 1);
15878                 if (err2)
15879                         goto out;
15880
15881                 tp->timer.expires = jiffies + tp->timer_offset;
15882                 add_timer(&tp->timer);
15883
15884                 netif_device_attach(dev);
15885                 tg3_netif_start(tp);
15886
15887 out:
15888                 tg3_full_unlock(tp);
15889
15890                 if (!err2)
15891                         tg3_phy_start(tp);
15892         }
15893
15894         return err;
15895 }
15896
15897 static int tg3_resume(struct device *device)
15898 {
15899         struct pci_dev *pdev = to_pci_dev(device);
15900         struct net_device *dev = pci_get_drvdata(pdev);
15901         struct tg3 *tp = netdev_priv(dev);
15902         int err;
15903
15904         if (!netif_running(dev))
15905                 return 0;
15906
15907         netif_device_attach(dev);
15908
15909         tg3_full_lock(tp, 0);
15910
15911         tg3_flag_set(tp, INIT_COMPLETE);
15912         err = tg3_restart_hw(tp, 1);
15913         if (err)
15914                 goto out;
15915
15916         tp->timer.expires = jiffies + tp->timer_offset;
15917         add_timer(&tp->timer);
15918
15919         tg3_netif_start(tp);
15920
15921 out:
15922         tg3_full_unlock(tp);
15923
15924         if (!err)
15925                 tg3_phy_start(tp);
15926
15927         return err;
15928 }
15929
15930 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15931 #define TG3_PM_OPS (&tg3_pm_ops)
15932
15933 #else
15934
15935 #define TG3_PM_OPS NULL
15936
15937 #endif /* CONFIG_PM_SLEEP */
15938
15939 /**
15940  * tg3_io_error_detected - called when PCI error is detected
15941  * @pdev: Pointer to PCI device
15942  * @state: The current pci connection state
15943  *
15944  * This function is called after a PCI bus error affecting
15945  * this device has been detected.
15946  */
15947 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15948                                               pci_channel_state_t state)
15949 {
15950         struct net_device *netdev = pci_get_drvdata(pdev);
15951         struct tg3 *tp = netdev_priv(netdev);
15952         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15953
15954         netdev_info(netdev, "PCI I/O error detected\n");
15955
15956         rtnl_lock();
15957
15958         if (!netif_running(netdev))
15959                 goto done;
15960
15961         tg3_phy_stop(tp);
15962
15963         tg3_netif_stop(tp);
15964
15965         del_timer_sync(&tp->timer);
15966
15967         /* Want to make sure that the reset task doesn't run */
15968         tg3_reset_task_cancel(tp);
15969         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15970
15971         netif_device_detach(netdev);
15972
15973         /* Clean up software state, even if MMIO is blocked */
15974         tg3_full_lock(tp, 0);
15975         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15976         tg3_full_unlock(tp);
15977
15978 done:
15979         if (state == pci_channel_io_perm_failure)
15980                 err = PCI_ERS_RESULT_DISCONNECT;
15981         else
15982                 pci_disable_device(pdev);
15983
15984         rtnl_unlock();
15985
15986         return err;
15987 }
15988
15989 /**
15990  * tg3_io_slot_reset - called after the pci bus has been reset.
15991  * @pdev: Pointer to PCI device
15992  *
15993  * Restart the card from scratch, as if from a cold-boot.
15994  * At this point, the card has exprienced a hard reset,
15995  * followed by fixups by BIOS, and has its config space
15996  * set up identically to what it was at cold boot.
15997  */
15998 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15999 {
16000         struct net_device *netdev = pci_get_drvdata(pdev);
16001         struct tg3 *tp = netdev_priv(netdev);
16002         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16003         int err;
16004
16005         rtnl_lock();
16006
16007         if (pci_enable_device(pdev)) {
16008                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16009                 goto done;
16010         }
16011
16012         pci_set_master(pdev);
16013         pci_restore_state(pdev);
16014         pci_save_state(pdev);
16015
16016         if (!netif_running(netdev)) {
16017                 rc = PCI_ERS_RESULT_RECOVERED;
16018                 goto done;
16019         }
16020
16021         err = tg3_power_up(tp);
16022         if (err)
16023                 goto done;
16024
16025         rc = PCI_ERS_RESULT_RECOVERED;
16026
16027 done:
16028         rtnl_unlock();
16029
16030         return rc;
16031 }
16032
16033 /**
16034  * tg3_io_resume - called when traffic can start flowing again.
16035  * @pdev: Pointer to PCI device
16036  *
16037  * This callback is called when the error recovery driver tells
16038  * us that its OK to resume normal operation.
16039  */
16040 static void tg3_io_resume(struct pci_dev *pdev)
16041 {
16042         struct net_device *netdev = pci_get_drvdata(pdev);
16043         struct tg3 *tp = netdev_priv(netdev);
16044         int err;
16045
16046         rtnl_lock();
16047
16048         if (!netif_running(netdev))
16049                 goto done;
16050
16051         tg3_full_lock(tp, 0);
16052         tg3_flag_set(tp, INIT_COMPLETE);
16053         err = tg3_restart_hw(tp, 1);
16054         tg3_full_unlock(tp);
16055         if (err) {
16056                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16057                 goto done;
16058         }
16059
16060         netif_device_attach(netdev);
16061
16062         tp->timer.expires = jiffies + tp->timer_offset;
16063         add_timer(&tp->timer);
16064
16065         tg3_netif_start(tp);
16066
16067         tg3_phy_start(tp);
16068
16069 done:
16070         rtnl_unlock();
16071 }
16072
16073 static struct pci_error_handlers tg3_err_handler = {
16074         .error_detected = tg3_io_error_detected,
16075         .slot_reset     = tg3_io_slot_reset,
16076         .resume         = tg3_io_resume
16077 };
16078
16079 static struct pci_driver tg3_driver = {
16080         .name           = DRV_MODULE_NAME,
16081         .id_table       = tg3_pci_tbl,
16082         .probe          = tg3_init_one,
16083         .remove         = __devexit_p(tg3_remove_one),
16084         .err_handler    = &tg3_err_handler,
16085         .driver.pm      = TG3_PM_OPS,
16086 };
16087
16088 static int __init tg3_init(void)
16089 {
16090         return pci_register_driver(&tg3_driver);
16091 }
16092
16093 static void __exit tg3_cleanup(void)
16094 {
16095         pci_unregister_driver(&tg3_driver);
16096 }
16097
16098 module_init(tg3_init);
16099 module_exit(tg3_cleanup);