tg3: Track LP advertising
[cascardo/linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     121
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "November 2, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138 #define TG3_RSS_INDIR_TBL_SIZE          128
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX               4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458         u32 reg;
1459         u32 val;
1460
1461         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462                 return;
1463
1464         tg3_wait_for_event_ack(tp);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470         val = 0;
1471         if (!tg3_readphy(tp, MII_BMCR, &reg))
1472                 val = reg << 16;
1473         if (!tg3_readphy(tp, MII_BMSR, &reg))
1474                 val |= (reg & 0xffff);
1475         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477         val = 0;
1478         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479                 val = reg << 16;
1480         if (!tg3_readphy(tp, MII_LPA, &reg))
1481                 val |= (reg & 0xffff);
1482         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484         val = 0;
1485         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487                         val = reg << 16;
1488                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489                         val |= (reg & 0xffff);
1490         }
1491         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494                 val = reg << 16;
1495         else
1496                 val = 0;
1497         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499         tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506                 /* Wait for RX cpu to ACK the previous event. */
1507                 tg3_wait_for_event_ack(tp);
1508
1509                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511                 tg3_generate_fw_event(tp);
1512
1513                 /* Wait for RX cpu to ACK this event. */
1514                 tg3_wait_for_event_ack(tp);
1515         }
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525                 switch (kind) {
1526                 case RESET_KIND_INIT:
1527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528                                       DRV_STATE_START);
1529                         break;
1530
1531                 case RESET_KIND_SHUTDOWN:
1532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533                                       DRV_STATE_UNLOAD);
1534                         break;
1535
1536                 case RESET_KIND_SUSPEND:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_SUSPEND);
1539                         break;
1540
1541                 default:
1542                         break;
1543                 }
1544         }
1545
1546         if (kind == RESET_KIND_INIT ||
1547             kind == RESET_KIND_SUSPEND)
1548                 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555                 switch (kind) {
1556                 case RESET_KIND_INIT:
1557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558                                       DRV_STATE_START_DONE);
1559                         break;
1560
1561                 case RESET_KIND_SHUTDOWN:
1562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563                                       DRV_STATE_UNLOAD_DONE);
1564                         break;
1565
1566                 default:
1567                         break;
1568                 }
1569         }
1570
1571         if (kind == RESET_KIND_SHUTDOWN)
1572                 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578         if (tg3_flag(tp, ENABLE_ASF)) {
1579                 switch (kind) {
1580                 case RESET_KIND_INIT:
1581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582                                       DRV_STATE_START);
1583                         break;
1584
1585                 case RESET_KIND_SHUTDOWN:
1586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587                                       DRV_STATE_UNLOAD);
1588                         break;
1589
1590                 case RESET_KIND_SUSPEND:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_SUSPEND);
1593                         break;
1594
1595                 default:
1596                         break;
1597                 }
1598         }
1599 }
1600
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603         int i;
1604         u32 val;
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607                 /* Wait up to 20ms for init done. */
1608                 for (i = 0; i < 200; i++) {
1609                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610                                 return 0;
1611                         udelay(100);
1612                 }
1613                 return -ENODEV;
1614         }
1615
1616         /* Wait for firmware initialization to complete. */
1617         for (i = 0; i < 100000; i++) {
1618                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620                         break;
1621                 udelay(10);
1622         }
1623
1624         /* Chip might not be fitted with firmware.  Some Sun onboard
1625          * parts are configured like that.  So don't signal the timeout
1626          * of the above loop as an error, but do report the lack of
1627          * running firmware once.
1628          */
1629         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632                 netdev_info(tp->dev, "No firmware running\n");
1633         }
1634
1635         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636                 /* The 57765 A0 needs a little more
1637                  * time to do some important work.
1638                  */
1639                 mdelay(10);
1640         }
1641
1642         return 0;
1643 }
1644
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647         if (!netif_carrier_ok(tp->dev)) {
1648                 netif_info(tp, link, tp->dev, "Link is down\n");
1649                 tg3_ump_link_report(tp);
1650         } else if (netif_msg_link(tp)) {
1651                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652                             (tp->link_config.active_speed == SPEED_1000 ?
1653                              1000 :
1654                              (tp->link_config.active_speed == SPEED_100 ?
1655                               100 : 10)),
1656                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1657                              "full" : "half"));
1658
1659                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661                             "on" : "off",
1662                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663                             "on" : "off");
1664
1665                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666                         netdev_info(tp->dev, "EEE is %s\n",
1667                                     tp->setlpicnt ? "enabled" : "disabled");
1668
1669                 tg3_ump_link_report(tp);
1670         }
1671 }
1672
1673 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1674 {
1675         u16 miireg;
1676
1677         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678                 miireg = ADVERTISE_PAUSE_CAP;
1679         else if (flow_ctrl & FLOW_CTRL_TX)
1680                 miireg = ADVERTISE_PAUSE_ASYM;
1681         else if (flow_ctrl & FLOW_CTRL_RX)
1682                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1683         else
1684                 miireg = 0;
1685
1686         return miireg;
1687 }
1688
1689 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1690 {
1691         u16 miireg;
1692
1693         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1694                 miireg = ADVERTISE_1000XPAUSE;
1695         else if (flow_ctrl & FLOW_CTRL_TX)
1696                 miireg = ADVERTISE_1000XPSE_ASYM;
1697         else if (flow_ctrl & FLOW_CTRL_RX)
1698                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1699         else
1700                 miireg = 0;
1701
1702         return miireg;
1703 }
1704
1705 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1706 {
1707         u8 cap = 0;
1708
1709         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1710                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1711         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1712                 if (lcladv & ADVERTISE_1000XPAUSE)
1713                         cap = FLOW_CTRL_RX;
1714                 if (rmtadv & ADVERTISE_1000XPAUSE)
1715                         cap = FLOW_CTRL_TX;
1716         }
1717
1718         return cap;
1719 }
1720
1721 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1722 {
1723         u8 autoneg;
1724         u8 flowctrl = 0;
1725         u32 old_rx_mode = tp->rx_mode;
1726         u32 old_tx_mode = tp->tx_mode;
1727
1728         if (tg3_flag(tp, USE_PHYLIB))
1729                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1730         else
1731                 autoneg = tp->link_config.autoneg;
1732
1733         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1734                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1735                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1736                 else
1737                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1738         } else
1739                 flowctrl = tp->link_config.flowctrl;
1740
1741         tp->link_config.active_flowctrl = flowctrl;
1742
1743         if (flowctrl & FLOW_CTRL_RX)
1744                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1745         else
1746                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1747
1748         if (old_rx_mode != tp->rx_mode)
1749                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1750
1751         if (flowctrl & FLOW_CTRL_TX)
1752                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1753         else
1754                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1755
1756         if (old_tx_mode != tp->tx_mode)
1757                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1758 }
1759
1760 static void tg3_adjust_link(struct net_device *dev)
1761 {
1762         u8 oldflowctrl, linkmesg = 0;
1763         u32 mac_mode, lcl_adv, rmt_adv;
1764         struct tg3 *tp = netdev_priv(dev);
1765         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1766
1767         spin_lock_bh(&tp->lock);
1768
1769         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1770                                     MAC_MODE_HALF_DUPLEX);
1771
1772         oldflowctrl = tp->link_config.active_flowctrl;
1773
1774         if (phydev->link) {
1775                 lcl_adv = 0;
1776                 rmt_adv = 0;
1777
1778                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1779                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1780                 else if (phydev->speed == SPEED_1000 ||
1781                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1782                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1783                 else
1784                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1785
1786                 if (phydev->duplex == DUPLEX_HALF)
1787                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1788                 else {
1789                         lcl_adv = tg3_advert_flowctrl_1000T(
1790                                   tp->link_config.flowctrl);
1791
1792                         if (phydev->pause)
1793                                 rmt_adv = LPA_PAUSE_CAP;
1794                         if (phydev->asym_pause)
1795                                 rmt_adv |= LPA_PAUSE_ASYM;
1796                 }
1797
1798                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1799         } else
1800                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1801
1802         if (mac_mode != tp->mac_mode) {
1803                 tp->mac_mode = mac_mode;
1804                 tw32_f(MAC_MODE, tp->mac_mode);
1805                 udelay(40);
1806         }
1807
1808         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1809                 if (phydev->speed == SPEED_10)
1810                         tw32(MAC_MI_STAT,
1811                              MAC_MI_STAT_10MBPS_MODE |
1812                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1813                 else
1814                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1815         }
1816
1817         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1818                 tw32(MAC_TX_LENGTHS,
1819                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1820                       (6 << TX_LENGTHS_IPG_SHIFT) |
1821                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1822         else
1823                 tw32(MAC_TX_LENGTHS,
1824                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1825                       (6 << TX_LENGTHS_IPG_SHIFT) |
1826                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1827
1828         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1829             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1830             phydev->speed != tp->link_config.active_speed ||
1831             phydev->duplex != tp->link_config.active_duplex ||
1832             oldflowctrl != tp->link_config.active_flowctrl)
1833                 linkmesg = 1;
1834
1835         tp->link_config.active_speed = phydev->speed;
1836         tp->link_config.active_duplex = phydev->duplex;
1837
1838         spin_unlock_bh(&tp->lock);
1839
1840         if (linkmesg)
1841                 tg3_link_report(tp);
1842 }
1843
1844 static int tg3_phy_init(struct tg3 *tp)
1845 {
1846         struct phy_device *phydev;
1847
1848         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1849                 return 0;
1850
1851         /* Bring the PHY back to a known state. */
1852         tg3_bmcr_reset(tp);
1853
1854         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1855
1856         /* Attach the MAC to the PHY. */
1857         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1858                              phydev->dev_flags, phydev->interface);
1859         if (IS_ERR(phydev)) {
1860                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1861                 return PTR_ERR(phydev);
1862         }
1863
1864         /* Mask with MAC supported features. */
1865         switch (phydev->interface) {
1866         case PHY_INTERFACE_MODE_GMII:
1867         case PHY_INTERFACE_MODE_RGMII:
1868                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1869                         phydev->supported &= (PHY_GBIT_FEATURES |
1870                                               SUPPORTED_Pause |
1871                                               SUPPORTED_Asym_Pause);
1872                         break;
1873                 }
1874                 /* fallthru */
1875         case PHY_INTERFACE_MODE_MII:
1876                 phydev->supported &= (PHY_BASIC_FEATURES |
1877                                       SUPPORTED_Pause |
1878                                       SUPPORTED_Asym_Pause);
1879                 break;
1880         default:
1881                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1882                 return -EINVAL;
1883         }
1884
1885         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1886
1887         phydev->advertising = phydev->supported;
1888
1889         return 0;
1890 }
1891
1892 static void tg3_phy_start(struct tg3 *tp)
1893 {
1894         struct phy_device *phydev;
1895
1896         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1897                 return;
1898
1899         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1900
1901         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1902                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1903                 phydev->speed = tp->link_config.orig_speed;
1904                 phydev->duplex = tp->link_config.orig_duplex;
1905                 phydev->autoneg = tp->link_config.orig_autoneg;
1906                 phydev->advertising = tp->link_config.orig_advertising;
1907         }
1908
1909         phy_start(phydev);
1910
1911         phy_start_aneg(phydev);
1912 }
1913
1914 static void tg3_phy_stop(struct tg3 *tp)
1915 {
1916         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1917                 return;
1918
1919         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1920 }
1921
1922 static void tg3_phy_fini(struct tg3 *tp)
1923 {
1924         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1925                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1927         }
1928 }
1929
1930 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1931 {
1932         int err;
1933         u32 val;
1934
1935         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1936                 return 0;
1937
1938         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1939                 /* Cannot do read-modify-write on 5401 */
1940                 err = tg3_phy_auxctl_write(tp,
1941                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1942                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1943                                            0x4c20);
1944                 goto done;
1945         }
1946
1947         err = tg3_phy_auxctl_read(tp,
1948                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1949         if (err)
1950                 return err;
1951
1952         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1953         err = tg3_phy_auxctl_write(tp,
1954                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1955
1956 done:
1957         return err;
1958 }
1959
1960 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1961 {
1962         u32 phytest;
1963
1964         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1965                 u32 phy;
1966
1967                 tg3_writephy(tp, MII_TG3_FET_TEST,
1968                              phytest | MII_TG3_FET_SHADOW_EN);
1969                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1970                         if (enable)
1971                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1972                         else
1973                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1974                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1975                 }
1976                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1977         }
1978 }
1979
1980 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1981 {
1982         u32 reg;
1983
1984         if (!tg3_flag(tp, 5705_PLUS) ||
1985             (tg3_flag(tp, 5717_PLUS) &&
1986              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1987                 return;
1988
1989         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1990                 tg3_phy_fet_toggle_apd(tp, enable);
1991                 return;
1992         }
1993
1994         reg = MII_TG3_MISC_SHDW_WREN |
1995               MII_TG3_MISC_SHDW_SCR5_SEL |
1996               MII_TG3_MISC_SHDW_SCR5_LPED |
1997               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1998               MII_TG3_MISC_SHDW_SCR5_SDTL |
1999               MII_TG3_MISC_SHDW_SCR5_C125OE;
2000         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2001                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2002
2003         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2004
2005
2006         reg = MII_TG3_MISC_SHDW_WREN |
2007               MII_TG3_MISC_SHDW_APD_SEL |
2008               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2009         if (enable)
2010                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2011
2012         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2013 }
2014
2015 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2016 {
2017         u32 phy;
2018
2019         if (!tg3_flag(tp, 5705_PLUS) ||
2020             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2021                 return;
2022
2023         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2024                 u32 ephy;
2025
2026                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2027                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2028
2029                         tg3_writephy(tp, MII_TG3_FET_TEST,
2030                                      ephy | MII_TG3_FET_SHADOW_EN);
2031                         if (!tg3_readphy(tp, reg, &phy)) {
2032                                 if (enable)
2033                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2034                                 else
2035                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2036                                 tg3_writephy(tp, reg, phy);
2037                         }
2038                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2039                 }
2040         } else {
2041                 int ret;
2042
2043                 ret = tg3_phy_auxctl_read(tp,
2044                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2045                 if (!ret) {
2046                         if (enable)
2047                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2048                         else
2049                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2050                         tg3_phy_auxctl_write(tp,
2051                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2052                 }
2053         }
2054 }
2055
2056 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2057 {
2058         int ret;
2059         u32 val;
2060
2061         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2062                 return;
2063
2064         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2065         if (!ret)
2066                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2067                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2068 }
2069
2070 static void tg3_phy_apply_otp(struct tg3 *tp)
2071 {
2072         u32 otp, phy;
2073
2074         if (!tp->phy_otp)
2075                 return;
2076
2077         otp = tp->phy_otp;
2078
2079         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2080                 return;
2081
2082         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2083         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2084         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2085
2086         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2087               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2088         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2089
2090         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2091         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2092         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2093
2094         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2095         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2096
2097         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2098         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2099
2100         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2101               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2102         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2103
2104         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2105 }
2106
2107 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2108 {
2109         u32 val;
2110
2111         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2112                 return;
2113
2114         tp->setlpicnt = 0;
2115
2116         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2117             current_link_up == 1 &&
2118             tp->link_config.active_duplex == DUPLEX_FULL &&
2119             (tp->link_config.active_speed == SPEED_100 ||
2120              tp->link_config.active_speed == SPEED_1000)) {
2121                 u32 eeectl;
2122
2123                 if (tp->link_config.active_speed == SPEED_1000)
2124                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2125                 else
2126                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2127
2128                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2129
2130                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2131                                   TG3_CL45_D7_EEERES_STAT, &val);
2132
2133                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2134                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2135                         tp->setlpicnt = 2;
2136         }
2137
2138         if (!tp->setlpicnt) {
2139                 if (current_link_up == 1 &&
2140                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2141                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2142                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2143                 }
2144
2145                 val = tr32(TG3_CPMU_EEE_MODE);
2146                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2147         }
2148 }
2149
2150 static void tg3_phy_eee_enable(struct tg3 *tp)
2151 {
2152         u32 val;
2153
2154         if (tp->link_config.active_speed == SPEED_1000 &&
2155             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2156              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2157              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2158             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2159                 val = MII_TG3_DSP_TAP26_ALNOKO |
2160                       MII_TG3_DSP_TAP26_RMRXSTO;
2161                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2162                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2163         }
2164
2165         val = tr32(TG3_CPMU_EEE_MODE);
2166         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2167 }
2168
2169 static int tg3_wait_macro_done(struct tg3 *tp)
2170 {
2171         int limit = 100;
2172
2173         while (limit--) {
2174                 u32 tmp32;
2175
2176                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2177                         if ((tmp32 & 0x1000) == 0)
2178                                 break;
2179                 }
2180         }
2181         if (limit < 0)
2182                 return -EBUSY;
2183
2184         return 0;
2185 }
2186
2187 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2188 {
2189         static const u32 test_pat[4][6] = {
2190         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2191         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2192         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2193         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2194         };
2195         int chan;
2196
2197         for (chan = 0; chan < 4; chan++) {
2198                 int i;
2199
2200                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2201                              (chan * 0x2000) | 0x0200);
2202                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2203
2204                 for (i = 0; i < 6; i++)
2205                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2206                                      test_pat[chan][i]);
2207
2208                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2209                 if (tg3_wait_macro_done(tp)) {
2210                         *resetp = 1;
2211                         return -EBUSY;
2212                 }
2213
2214                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2215                              (chan * 0x2000) | 0x0200);
2216                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2217                 if (tg3_wait_macro_done(tp)) {
2218                         *resetp = 1;
2219                         return -EBUSY;
2220                 }
2221
2222                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2223                 if (tg3_wait_macro_done(tp)) {
2224                         *resetp = 1;
2225                         return -EBUSY;
2226                 }
2227
2228                 for (i = 0; i < 6; i += 2) {
2229                         u32 low, high;
2230
2231                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2232                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2233                             tg3_wait_macro_done(tp)) {
2234                                 *resetp = 1;
2235                                 return -EBUSY;
2236                         }
2237                         low &= 0x7fff;
2238                         high &= 0x000f;
2239                         if (low != test_pat[chan][i] ||
2240                             high != test_pat[chan][i+1]) {
2241                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2242                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2243                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2244
2245                                 return -EBUSY;
2246                         }
2247                 }
2248         }
2249
2250         return 0;
2251 }
2252
2253 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2254 {
2255         int chan;
2256
2257         for (chan = 0; chan < 4; chan++) {
2258                 int i;
2259
2260                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2261                              (chan * 0x2000) | 0x0200);
2262                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2263                 for (i = 0; i < 6; i++)
2264                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2265                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2266                 if (tg3_wait_macro_done(tp))
2267                         return -EBUSY;
2268         }
2269
2270         return 0;
2271 }
2272
2273 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2274 {
2275         u32 reg32, phy9_orig;
2276         int retries, do_phy_reset, err;
2277
2278         retries = 10;
2279         do_phy_reset = 1;
2280         do {
2281                 if (do_phy_reset) {
2282                         err = tg3_bmcr_reset(tp);
2283                         if (err)
2284                                 return err;
2285                         do_phy_reset = 0;
2286                 }
2287
2288                 /* Disable transmitter and interrupt.  */
2289                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2290                         continue;
2291
2292                 reg32 |= 0x3000;
2293                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2294
2295                 /* Set full-duplex, 1000 mbps.  */
2296                 tg3_writephy(tp, MII_BMCR,
2297                              BMCR_FULLDPLX | BMCR_SPEED1000);
2298
2299                 /* Set to master mode.  */
2300                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2301                         continue;
2302
2303                 tg3_writephy(tp, MII_CTRL1000,
2304                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2305
2306                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2307                 if (err)
2308                         return err;
2309
2310                 /* Block the PHY control access.  */
2311                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2312
2313                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2314                 if (!err)
2315                         break;
2316         } while (--retries);
2317
2318         err = tg3_phy_reset_chanpat(tp);
2319         if (err)
2320                 return err;
2321
2322         tg3_phydsp_write(tp, 0x8005, 0x0000);
2323
2324         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2325         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2326
2327         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2328
2329         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2330
2331         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2332                 reg32 &= ~0x3000;
2333                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2334         } else if (!err)
2335                 err = -EBUSY;
2336
2337         return err;
2338 }
2339
2340 /* This will reset the tigon3 PHY if there is no valid
2341  * link unless the FORCE argument is non-zero.
2342  */
2343 static int tg3_phy_reset(struct tg3 *tp)
2344 {
2345         u32 val, cpmuctrl;
2346         int err;
2347
2348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2349                 val = tr32(GRC_MISC_CFG);
2350                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2351                 udelay(40);
2352         }
2353         err  = tg3_readphy(tp, MII_BMSR, &val);
2354         err |= tg3_readphy(tp, MII_BMSR, &val);
2355         if (err != 0)
2356                 return -EBUSY;
2357
2358         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2359                 netif_carrier_off(tp->dev);
2360                 tg3_link_report(tp);
2361         }
2362
2363         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2366                 err = tg3_phy_reset_5703_4_5(tp);
2367                 if (err)
2368                         return err;
2369                 goto out;
2370         }
2371
2372         cpmuctrl = 0;
2373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2374             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2375                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2376                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2377                         tw32(TG3_CPMU_CTRL,
2378                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2379         }
2380
2381         err = tg3_bmcr_reset(tp);
2382         if (err)
2383                 return err;
2384
2385         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2386                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2387                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2388
2389                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2390         }
2391
2392         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2393             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2394                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2395                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2396                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2397                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2398                         udelay(40);
2399                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2400                 }
2401         }
2402
2403         if (tg3_flag(tp, 5717_PLUS) &&
2404             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2405                 return 0;
2406
2407         tg3_phy_apply_otp(tp);
2408
2409         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2410                 tg3_phy_toggle_apd(tp, true);
2411         else
2412                 tg3_phy_toggle_apd(tp, false);
2413
2414 out:
2415         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2416             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2417                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2418                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2419                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2420         }
2421
2422         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2423                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2424                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2425         }
2426
2427         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2428                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2429                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2430                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2431                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2432                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2433                 }
2434         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2435                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2436                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2437                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2438                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2439                                 tg3_writephy(tp, MII_TG3_TEST1,
2440                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2441                         } else
2442                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2443
2444                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2445                 }
2446         }
2447
2448         /* Set Extended packet length bit (bit 14) on all chips that */
2449         /* support jumbo frames */
2450         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2451                 /* Cannot do read-modify-write on 5401 */
2452                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2453         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2454                 /* Set bit 14 with read-modify-write to preserve other bits */
2455                 err = tg3_phy_auxctl_read(tp,
2456                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2457                 if (!err)
2458                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2459                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2460         }
2461
2462         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2463          * jumbo frames transmission.
2464          */
2465         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2466                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2467                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2468                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2469         }
2470
2471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2472                 /* adjust output voltage */
2473                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2474         }
2475
2476         tg3_phy_toggle_automdix(tp, 1);
2477         tg3_phy_set_wirespeed(tp);
2478         return 0;
2479 }
2480
2481 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2482 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2483 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2484                                           TG3_GPIO_MSG_NEED_VAUX)
2485 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2486         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2487          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2488          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2489          (TG3_GPIO_MSG_DRVR_PRES << 12))
2490
2491 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2492         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2493          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2494          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2495          (TG3_GPIO_MSG_NEED_VAUX << 12))
2496
2497 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2498 {
2499         u32 status, shift;
2500
2501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2502             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2503                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2504         else
2505                 status = tr32(TG3_CPMU_DRV_STATUS);
2506
2507         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2508         status &= ~(TG3_GPIO_MSG_MASK << shift);
2509         status |= (newstat << shift);
2510
2511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2512             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2513                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2514         else
2515                 tw32(TG3_CPMU_DRV_STATUS, status);
2516
2517         return status >> TG3_APE_GPIO_MSG_SHIFT;
2518 }
2519
2520 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2521 {
2522         if (!tg3_flag(tp, IS_NIC))
2523                 return 0;
2524
2525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2526             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2527             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2528                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2529                         return -EIO;
2530
2531                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2532
2533                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2534                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2535
2536                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2537         } else {
2538                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2539                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2540         }
2541
2542         return 0;
2543 }
2544
2545 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2546 {
2547         u32 grc_local_ctrl;
2548
2549         if (!tg3_flag(tp, IS_NIC) ||
2550             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2551             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2552                 return;
2553
2554         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2555
2556         tw32_wait_f(GRC_LOCAL_CTRL,
2557                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2558                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2559
2560         tw32_wait_f(GRC_LOCAL_CTRL,
2561                     grc_local_ctrl,
2562                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2563
2564         tw32_wait_f(GRC_LOCAL_CTRL,
2565                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2566                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2567 }
2568
2569 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2570 {
2571         if (!tg3_flag(tp, IS_NIC))
2572                 return;
2573
2574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2575             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2576                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2577                             (GRC_LCLCTRL_GPIO_OE0 |
2578                              GRC_LCLCTRL_GPIO_OE1 |
2579                              GRC_LCLCTRL_GPIO_OE2 |
2580                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2581                              GRC_LCLCTRL_GPIO_OUTPUT1),
2582                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2583         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2584                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2585                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2586                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2587                                      GRC_LCLCTRL_GPIO_OE1 |
2588                                      GRC_LCLCTRL_GPIO_OE2 |
2589                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2590                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2591                                      tp->grc_local_ctrl;
2592                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2593                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2594
2595                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2596                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2597                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2598
2599                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2600                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2602         } else {
2603                 u32 no_gpio2;
2604                 u32 grc_local_ctrl = 0;
2605
2606                 /* Workaround to prevent overdrawing Amps. */
2607                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2608                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2609                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2610                                     grc_local_ctrl,
2611                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2612                 }
2613
2614                 /* On 5753 and variants, GPIO2 cannot be used. */
2615                 no_gpio2 = tp->nic_sram_data_cfg &
2616                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2617
2618                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2619                                   GRC_LCLCTRL_GPIO_OE1 |
2620                                   GRC_LCLCTRL_GPIO_OE2 |
2621                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2622                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2623                 if (no_gpio2) {
2624                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2625                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2626                 }
2627                 tw32_wait_f(GRC_LOCAL_CTRL,
2628                             tp->grc_local_ctrl | grc_local_ctrl,
2629                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2630
2631                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2632
2633                 tw32_wait_f(GRC_LOCAL_CTRL,
2634                             tp->grc_local_ctrl | grc_local_ctrl,
2635                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2636
2637                 if (!no_gpio2) {
2638                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2639                         tw32_wait_f(GRC_LOCAL_CTRL,
2640                                     tp->grc_local_ctrl | grc_local_ctrl,
2641                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2642                 }
2643         }
2644 }
2645
2646 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2647 {
2648         u32 msg = 0;
2649
2650         /* Serialize power state transitions */
2651         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2652                 return;
2653
2654         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2655                 msg = TG3_GPIO_MSG_NEED_VAUX;
2656
2657         msg = tg3_set_function_status(tp, msg);
2658
2659         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2660                 goto done;
2661
2662         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2663                 tg3_pwrsrc_switch_to_vaux(tp);
2664         else
2665                 tg3_pwrsrc_die_with_vmain(tp);
2666
2667 done:
2668         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2669 }
2670
2671 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2672 {
2673         bool need_vaux = false;
2674
2675         /* The GPIOs do something completely different on 57765. */
2676         if (!tg3_flag(tp, IS_NIC) ||
2677             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2678                 return;
2679
2680         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2682             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2683                 tg3_frob_aux_power_5717(tp, include_wol ?
2684                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2685                 return;
2686         }
2687
2688         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2689                 struct net_device *dev_peer;
2690
2691                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2692
2693                 /* remove_one() may have been run on the peer. */
2694                 if (dev_peer) {
2695                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2696
2697                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2698                                 return;
2699
2700                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2701                             tg3_flag(tp_peer, ENABLE_ASF))
2702                                 need_vaux = true;
2703                 }
2704         }
2705
2706         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2707             tg3_flag(tp, ENABLE_ASF))
2708                 need_vaux = true;
2709
2710         if (need_vaux)
2711                 tg3_pwrsrc_switch_to_vaux(tp);
2712         else
2713                 tg3_pwrsrc_die_with_vmain(tp);
2714 }
2715
2716 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2717 {
2718         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2719                 return 1;
2720         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2721                 if (speed != SPEED_10)
2722                         return 1;
2723         } else if (speed == SPEED_10)
2724                 return 1;
2725
2726         return 0;
2727 }
2728
2729 static int tg3_setup_phy(struct tg3 *, int);
2730 static int tg3_halt_cpu(struct tg3 *, u32);
2731
2732 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2733 {
2734         u32 val;
2735
2736         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2737                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2738                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2739                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2740
2741                         sg_dig_ctrl |=
2742                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2743                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2744                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2745                 }
2746                 return;
2747         }
2748
2749         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2750                 tg3_bmcr_reset(tp);
2751                 val = tr32(GRC_MISC_CFG);
2752                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2753                 udelay(40);
2754                 return;
2755         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2756                 u32 phytest;
2757                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2758                         u32 phy;
2759
2760                         tg3_writephy(tp, MII_ADVERTISE, 0);
2761                         tg3_writephy(tp, MII_BMCR,
2762                                      BMCR_ANENABLE | BMCR_ANRESTART);
2763
2764                         tg3_writephy(tp, MII_TG3_FET_TEST,
2765                                      phytest | MII_TG3_FET_SHADOW_EN);
2766                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2767                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2768                                 tg3_writephy(tp,
2769                                              MII_TG3_FET_SHDW_AUXMODE4,
2770                                              phy);
2771                         }
2772                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2773                 }
2774                 return;
2775         } else if (do_low_power) {
2776                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2777                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2778
2779                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2780                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2781                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2782                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2783         }
2784
2785         /* The PHY should not be powered down on some chips because
2786          * of bugs.
2787          */
2788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2789             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2790             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2791              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2792                 return;
2793
2794         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2795             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2796                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2797                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2798                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2799                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2800         }
2801
2802         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2803 }
2804
2805 /* tp->lock is held. */
2806 static int tg3_nvram_lock(struct tg3 *tp)
2807 {
2808         if (tg3_flag(tp, NVRAM)) {
2809                 int i;
2810
2811                 if (tp->nvram_lock_cnt == 0) {
2812                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2813                         for (i = 0; i < 8000; i++) {
2814                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2815                                         break;
2816                                 udelay(20);
2817                         }
2818                         if (i == 8000) {
2819                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2820                                 return -ENODEV;
2821                         }
2822                 }
2823                 tp->nvram_lock_cnt++;
2824         }
2825         return 0;
2826 }
2827
2828 /* tp->lock is held. */
2829 static void tg3_nvram_unlock(struct tg3 *tp)
2830 {
2831         if (tg3_flag(tp, NVRAM)) {
2832                 if (tp->nvram_lock_cnt > 0)
2833                         tp->nvram_lock_cnt--;
2834                 if (tp->nvram_lock_cnt == 0)
2835                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2836         }
2837 }
2838
2839 /* tp->lock is held. */
2840 static void tg3_enable_nvram_access(struct tg3 *tp)
2841 {
2842         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2843                 u32 nvaccess = tr32(NVRAM_ACCESS);
2844
2845                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2846         }
2847 }
2848
2849 /* tp->lock is held. */
2850 static void tg3_disable_nvram_access(struct tg3 *tp)
2851 {
2852         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2853                 u32 nvaccess = tr32(NVRAM_ACCESS);
2854
2855                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2856         }
2857 }
2858
2859 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2860                                         u32 offset, u32 *val)
2861 {
2862         u32 tmp;
2863         int i;
2864
2865         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2866                 return -EINVAL;
2867
2868         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2869                                         EEPROM_ADDR_DEVID_MASK |
2870                                         EEPROM_ADDR_READ);
2871         tw32(GRC_EEPROM_ADDR,
2872              tmp |
2873              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2874              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2875               EEPROM_ADDR_ADDR_MASK) |
2876              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2877
2878         for (i = 0; i < 1000; i++) {
2879                 tmp = tr32(GRC_EEPROM_ADDR);
2880
2881                 if (tmp & EEPROM_ADDR_COMPLETE)
2882                         break;
2883                 msleep(1);
2884         }
2885         if (!(tmp & EEPROM_ADDR_COMPLETE))
2886                 return -EBUSY;
2887
2888         tmp = tr32(GRC_EEPROM_DATA);
2889
2890         /*
2891          * The data will always be opposite the native endian
2892          * format.  Perform a blind byteswap to compensate.
2893          */
2894         *val = swab32(tmp);
2895
2896         return 0;
2897 }
2898
2899 #define NVRAM_CMD_TIMEOUT 10000
2900
2901 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2902 {
2903         int i;
2904
2905         tw32(NVRAM_CMD, nvram_cmd);
2906         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2907                 udelay(10);
2908                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2909                         udelay(10);
2910                         break;
2911                 }
2912         }
2913
2914         if (i == NVRAM_CMD_TIMEOUT)
2915                 return -EBUSY;
2916
2917         return 0;
2918 }
2919
2920 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2921 {
2922         if (tg3_flag(tp, NVRAM) &&
2923             tg3_flag(tp, NVRAM_BUFFERED) &&
2924             tg3_flag(tp, FLASH) &&
2925             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2926             (tp->nvram_jedecnum == JEDEC_ATMEL))
2927
2928                 addr = ((addr / tp->nvram_pagesize) <<
2929                         ATMEL_AT45DB0X1B_PAGE_POS) +
2930                        (addr % tp->nvram_pagesize);
2931
2932         return addr;
2933 }
2934
2935 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2936 {
2937         if (tg3_flag(tp, NVRAM) &&
2938             tg3_flag(tp, NVRAM_BUFFERED) &&
2939             tg3_flag(tp, FLASH) &&
2940             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2941             (tp->nvram_jedecnum == JEDEC_ATMEL))
2942
2943                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2944                         tp->nvram_pagesize) +
2945                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2946
2947         return addr;
2948 }
2949
2950 /* NOTE: Data read in from NVRAM is byteswapped according to
2951  * the byteswapping settings for all other register accesses.
2952  * tg3 devices are BE devices, so on a BE machine, the data
2953  * returned will be exactly as it is seen in NVRAM.  On a LE
2954  * machine, the 32-bit value will be byteswapped.
2955  */
2956 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2957 {
2958         int ret;
2959
2960         if (!tg3_flag(tp, NVRAM))
2961                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2962
2963         offset = tg3_nvram_phys_addr(tp, offset);
2964
2965         if (offset > NVRAM_ADDR_MSK)
2966                 return -EINVAL;
2967
2968         ret = tg3_nvram_lock(tp);
2969         if (ret)
2970                 return ret;
2971
2972         tg3_enable_nvram_access(tp);
2973
2974         tw32(NVRAM_ADDR, offset);
2975         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2976                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2977
2978         if (ret == 0)
2979                 *val = tr32(NVRAM_RDDATA);
2980
2981         tg3_disable_nvram_access(tp);
2982
2983         tg3_nvram_unlock(tp);
2984
2985         return ret;
2986 }
2987
2988 /* Ensures NVRAM data is in bytestream format. */
2989 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2990 {
2991         u32 v;
2992         int res = tg3_nvram_read(tp, offset, &v);
2993         if (!res)
2994                 *val = cpu_to_be32(v);
2995         return res;
2996 }
2997
2998 #define RX_CPU_SCRATCH_BASE     0x30000
2999 #define RX_CPU_SCRATCH_SIZE     0x04000
3000 #define TX_CPU_SCRATCH_BASE     0x34000
3001 #define TX_CPU_SCRATCH_SIZE     0x04000
3002
3003 /* tp->lock is held. */
3004 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3005 {
3006         int i;
3007
3008         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3009
3010         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3011                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3012
3013                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3014                 return 0;
3015         }
3016         if (offset == RX_CPU_BASE) {
3017                 for (i = 0; i < 10000; i++) {
3018                         tw32(offset + CPU_STATE, 0xffffffff);
3019                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3020                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3021                                 break;
3022                 }
3023
3024                 tw32(offset + CPU_STATE, 0xffffffff);
3025                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3026                 udelay(10);
3027         } else {
3028                 for (i = 0; i < 10000; i++) {
3029                         tw32(offset + CPU_STATE, 0xffffffff);
3030                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3031                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3032                                 break;
3033                 }
3034         }
3035
3036         if (i >= 10000) {
3037                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3038                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3039                 return -ENODEV;
3040         }
3041
3042         /* Clear firmware's nvram arbitration. */
3043         if (tg3_flag(tp, NVRAM))
3044                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3045         return 0;
3046 }
3047
3048 struct fw_info {
3049         unsigned int fw_base;
3050         unsigned int fw_len;
3051         const __be32 *fw_data;
3052 };
3053
3054 /* tp->lock is held. */
3055 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3056                                  u32 cpu_scratch_base, int cpu_scratch_size,
3057                                  struct fw_info *info)
3058 {
3059         int err, lock_err, i;
3060         void (*write_op)(struct tg3 *, u32, u32);
3061
3062         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3063                 netdev_err(tp->dev,
3064                            "%s: Trying to load TX cpu firmware which is 5705\n",
3065                            __func__);
3066                 return -EINVAL;
3067         }
3068
3069         if (tg3_flag(tp, 5705_PLUS))
3070                 write_op = tg3_write_mem;
3071         else
3072                 write_op = tg3_write_indirect_reg32;
3073
3074         /* It is possible that bootcode is still loading at this point.
3075          * Get the nvram lock first before halting the cpu.
3076          */
3077         lock_err = tg3_nvram_lock(tp);
3078         err = tg3_halt_cpu(tp, cpu_base);
3079         if (!lock_err)
3080                 tg3_nvram_unlock(tp);
3081         if (err)
3082                 goto out;
3083
3084         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3085                 write_op(tp, cpu_scratch_base + i, 0);
3086         tw32(cpu_base + CPU_STATE, 0xffffffff);
3087         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3088         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3089                 write_op(tp, (cpu_scratch_base +
3090                               (info->fw_base & 0xffff) +
3091                               (i * sizeof(u32))),
3092                               be32_to_cpu(info->fw_data[i]));
3093
3094         err = 0;
3095
3096 out:
3097         return err;
3098 }
3099
3100 /* tp->lock is held. */
3101 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3102 {
3103         struct fw_info info;
3104         const __be32 *fw_data;
3105         int err, i;
3106
3107         fw_data = (void *)tp->fw->data;
3108
3109         /* Firmware blob starts with version numbers, followed by
3110            start address and length. We are setting complete length.
3111            length = end_address_of_bss - start_address_of_text.
3112            Remainder is the blob to be loaded contiguously
3113            from start address. */
3114
3115         info.fw_base = be32_to_cpu(fw_data[1]);
3116         info.fw_len = tp->fw->size - 12;
3117         info.fw_data = &fw_data[3];
3118
3119         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3120                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3121                                     &info);
3122         if (err)
3123                 return err;
3124
3125         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3126                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3127                                     &info);
3128         if (err)
3129                 return err;
3130
3131         /* Now startup only the RX cpu. */
3132         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3133         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3134
3135         for (i = 0; i < 5; i++) {
3136                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3137                         break;
3138                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3139                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3140                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3141                 udelay(1000);
3142         }
3143         if (i >= 5) {
3144                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3145                            "should be %08x\n", __func__,
3146                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3147                 return -ENODEV;
3148         }
3149         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3150         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3151
3152         return 0;
3153 }
3154
3155 /* tp->lock is held. */
3156 static int tg3_load_tso_firmware(struct tg3 *tp)
3157 {
3158         struct fw_info info;
3159         const __be32 *fw_data;
3160         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3161         int err, i;
3162
3163         if (tg3_flag(tp, HW_TSO_1) ||
3164             tg3_flag(tp, HW_TSO_2) ||
3165             tg3_flag(tp, HW_TSO_3))
3166                 return 0;
3167
3168         fw_data = (void *)tp->fw->data;
3169
3170         /* Firmware blob starts with version numbers, followed by
3171            start address and length. We are setting complete length.
3172            length = end_address_of_bss - start_address_of_text.
3173            Remainder is the blob to be loaded contiguously
3174            from start address. */
3175
3176         info.fw_base = be32_to_cpu(fw_data[1]);
3177         cpu_scratch_size = tp->fw_len;
3178         info.fw_len = tp->fw->size - 12;
3179         info.fw_data = &fw_data[3];
3180
3181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3182                 cpu_base = RX_CPU_BASE;
3183                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3184         } else {
3185                 cpu_base = TX_CPU_BASE;
3186                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3187                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3188         }
3189
3190         err = tg3_load_firmware_cpu(tp, cpu_base,
3191                                     cpu_scratch_base, cpu_scratch_size,
3192                                     &info);
3193         if (err)
3194                 return err;
3195
3196         /* Now startup the cpu. */
3197         tw32(cpu_base + CPU_STATE, 0xffffffff);
3198         tw32_f(cpu_base + CPU_PC, info.fw_base);
3199
3200         for (i = 0; i < 5; i++) {
3201                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3202                         break;
3203                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3204                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3205                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3206                 udelay(1000);
3207         }
3208         if (i >= 5) {
3209                 netdev_err(tp->dev,
3210                            "%s fails to set CPU PC, is %08x should be %08x\n",
3211                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3212                 return -ENODEV;
3213         }
3214         tw32(cpu_base + CPU_STATE, 0xffffffff);
3215         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3216         return 0;
3217 }
3218
3219
3220 /* tp->lock is held. */
3221 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3222 {
3223         u32 addr_high, addr_low;
3224         int i;
3225
3226         addr_high = ((tp->dev->dev_addr[0] << 8) |
3227                      tp->dev->dev_addr[1]);
3228         addr_low = ((tp->dev->dev_addr[2] << 24) |
3229                     (tp->dev->dev_addr[3] << 16) |
3230                     (tp->dev->dev_addr[4] <<  8) |
3231                     (tp->dev->dev_addr[5] <<  0));
3232         for (i = 0; i < 4; i++) {
3233                 if (i == 1 && skip_mac_1)
3234                         continue;
3235                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3236                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3237         }
3238
3239         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3240             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3241                 for (i = 0; i < 12; i++) {
3242                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3243                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3244                 }
3245         }
3246
3247         addr_high = (tp->dev->dev_addr[0] +
3248                      tp->dev->dev_addr[1] +
3249                      tp->dev->dev_addr[2] +
3250                      tp->dev->dev_addr[3] +
3251                      tp->dev->dev_addr[4] +
3252                      tp->dev->dev_addr[5]) &
3253                 TX_BACKOFF_SEED_MASK;
3254         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3255 }
3256
3257 static void tg3_enable_register_access(struct tg3 *tp)
3258 {
3259         /*
3260          * Make sure register accesses (indirect or otherwise) will function
3261          * correctly.
3262          */
3263         pci_write_config_dword(tp->pdev,
3264                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3265 }
3266
3267 static int tg3_power_up(struct tg3 *tp)
3268 {
3269         int err;
3270
3271         tg3_enable_register_access(tp);
3272
3273         err = pci_set_power_state(tp->pdev, PCI_D0);
3274         if (!err) {
3275                 /* Switch out of Vaux if it is a NIC */
3276                 tg3_pwrsrc_switch_to_vmain(tp);
3277         } else {
3278                 netdev_err(tp->dev, "Transition to D0 failed\n");
3279         }
3280
3281         return err;
3282 }
3283
3284 static int tg3_power_down_prepare(struct tg3 *tp)
3285 {
3286         u32 misc_host_ctrl;
3287         bool device_should_wake, do_low_power;
3288
3289         tg3_enable_register_access(tp);
3290
3291         /* Restore the CLKREQ setting. */
3292         if (tg3_flag(tp, CLKREQ_BUG)) {
3293                 u16 lnkctl;
3294
3295                 pci_read_config_word(tp->pdev,
3296                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3297                                      &lnkctl);
3298                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3299                 pci_write_config_word(tp->pdev,
3300                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3301                                       lnkctl);
3302         }
3303
3304         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3305         tw32(TG3PCI_MISC_HOST_CTRL,
3306              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3307
3308         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3309                              tg3_flag(tp, WOL_ENABLE);
3310
3311         if (tg3_flag(tp, USE_PHYLIB)) {
3312                 do_low_power = false;
3313                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3314                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3315                         struct phy_device *phydev;
3316                         u32 phyid, advertising;
3317
3318                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3319
3320                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3321
3322                         tp->link_config.orig_speed = phydev->speed;
3323                         tp->link_config.orig_duplex = phydev->duplex;
3324                         tp->link_config.orig_autoneg = phydev->autoneg;
3325                         tp->link_config.orig_advertising = phydev->advertising;
3326
3327                         advertising = ADVERTISED_TP |
3328                                       ADVERTISED_Pause |
3329                                       ADVERTISED_Autoneg |
3330                                       ADVERTISED_10baseT_Half;
3331
3332                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3333                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3334                                         advertising |=
3335                                                 ADVERTISED_100baseT_Half |
3336                                                 ADVERTISED_100baseT_Full |
3337                                                 ADVERTISED_10baseT_Full;
3338                                 else
3339                                         advertising |= ADVERTISED_10baseT_Full;
3340                         }
3341
3342                         phydev->advertising = advertising;
3343
3344                         phy_start_aneg(phydev);
3345
3346                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3347                         if (phyid != PHY_ID_BCMAC131) {
3348                                 phyid &= PHY_BCM_OUI_MASK;
3349                                 if (phyid == PHY_BCM_OUI_1 ||
3350                                     phyid == PHY_BCM_OUI_2 ||
3351                                     phyid == PHY_BCM_OUI_3)
3352                                         do_low_power = true;
3353                         }
3354                 }
3355         } else {
3356                 do_low_power = true;
3357
3358                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3359                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3360                         tp->link_config.orig_speed = tp->link_config.speed;
3361                         tp->link_config.orig_duplex = tp->link_config.duplex;
3362                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3363                 }
3364
3365                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3366                         tp->link_config.speed = SPEED_10;
3367                         tp->link_config.duplex = DUPLEX_HALF;
3368                         tp->link_config.autoneg = AUTONEG_ENABLE;
3369                         tg3_setup_phy(tp, 0);
3370                 }
3371         }
3372
3373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3374                 u32 val;
3375
3376                 val = tr32(GRC_VCPU_EXT_CTRL);
3377                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3378         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3379                 int i;
3380                 u32 val;
3381
3382                 for (i = 0; i < 200; i++) {
3383                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3384                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3385                                 break;
3386                         msleep(1);
3387                 }
3388         }
3389         if (tg3_flag(tp, WOL_CAP))
3390                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3391                                                      WOL_DRV_STATE_SHUTDOWN |
3392                                                      WOL_DRV_WOL |
3393                                                      WOL_SET_MAGIC_PKT);
3394
3395         if (device_should_wake) {
3396                 u32 mac_mode;
3397
3398                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3399                         if (do_low_power &&
3400                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3401                                 tg3_phy_auxctl_write(tp,
3402                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3403                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3404                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3405                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3406                                 udelay(40);
3407                         }
3408
3409                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3410                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3411                         else
3412                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3413
3414                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3415                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3416                             ASIC_REV_5700) {
3417                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3418                                              SPEED_100 : SPEED_10;
3419                                 if (tg3_5700_link_polarity(tp, speed))
3420                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3421                                 else
3422                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3423                         }
3424                 } else {
3425                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3426                 }
3427
3428                 if (!tg3_flag(tp, 5750_PLUS))
3429                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3430
3431                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3432                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3433                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3434                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3435
3436                 if (tg3_flag(tp, ENABLE_APE))
3437                         mac_mode |= MAC_MODE_APE_TX_EN |
3438                                     MAC_MODE_APE_RX_EN |
3439                                     MAC_MODE_TDE_ENABLE;
3440
3441                 tw32_f(MAC_MODE, mac_mode);
3442                 udelay(100);
3443
3444                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3445                 udelay(10);
3446         }
3447
3448         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3449             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3451                 u32 base_val;
3452
3453                 base_val = tp->pci_clock_ctrl;
3454                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3455                              CLOCK_CTRL_TXCLK_DISABLE);
3456
3457                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3458                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3459         } else if (tg3_flag(tp, 5780_CLASS) ||
3460                    tg3_flag(tp, CPMU_PRESENT) ||
3461                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3462                 /* do nothing */
3463         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3464                 u32 newbits1, newbits2;
3465
3466                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3467                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3468                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3469                                     CLOCK_CTRL_TXCLK_DISABLE |
3470                                     CLOCK_CTRL_ALTCLK);
3471                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3472                 } else if (tg3_flag(tp, 5705_PLUS)) {
3473                         newbits1 = CLOCK_CTRL_625_CORE;
3474                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3475                 } else {
3476                         newbits1 = CLOCK_CTRL_ALTCLK;
3477                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3478                 }
3479
3480                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3481                             40);
3482
3483                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3484                             40);
3485
3486                 if (!tg3_flag(tp, 5705_PLUS)) {
3487                         u32 newbits3;
3488
3489                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3490                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3491                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3492                                             CLOCK_CTRL_TXCLK_DISABLE |
3493                                             CLOCK_CTRL_44MHZ_CORE);
3494                         } else {
3495                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3496                         }
3497
3498                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3499                                     tp->pci_clock_ctrl | newbits3, 40);
3500                 }
3501         }
3502
3503         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3504                 tg3_power_down_phy(tp, do_low_power);
3505
3506         tg3_frob_aux_power(tp, true);
3507
3508         /* Workaround for unstable PLL clock */
3509         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3510             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3511                 u32 val = tr32(0x7d00);
3512
3513                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3514                 tw32(0x7d00, val);
3515                 if (!tg3_flag(tp, ENABLE_ASF)) {
3516                         int err;
3517
3518                         err = tg3_nvram_lock(tp);
3519                         tg3_halt_cpu(tp, RX_CPU_BASE);
3520                         if (!err)
3521                                 tg3_nvram_unlock(tp);
3522                 }
3523         }
3524
3525         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3526
3527         return 0;
3528 }
3529
3530 static void tg3_power_down(struct tg3 *tp)
3531 {
3532         tg3_power_down_prepare(tp);
3533
3534         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3535         pci_set_power_state(tp->pdev, PCI_D3hot);
3536 }
3537
3538 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3539 {
3540         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3541         case MII_TG3_AUX_STAT_10HALF:
3542                 *speed = SPEED_10;
3543                 *duplex = DUPLEX_HALF;
3544                 break;
3545
3546         case MII_TG3_AUX_STAT_10FULL:
3547                 *speed = SPEED_10;
3548                 *duplex = DUPLEX_FULL;
3549                 break;
3550
3551         case MII_TG3_AUX_STAT_100HALF:
3552                 *speed = SPEED_100;
3553                 *duplex = DUPLEX_HALF;
3554                 break;
3555
3556         case MII_TG3_AUX_STAT_100FULL:
3557                 *speed = SPEED_100;
3558                 *duplex = DUPLEX_FULL;
3559                 break;
3560
3561         case MII_TG3_AUX_STAT_1000HALF:
3562                 *speed = SPEED_1000;
3563                 *duplex = DUPLEX_HALF;
3564                 break;
3565
3566         case MII_TG3_AUX_STAT_1000FULL:
3567                 *speed = SPEED_1000;
3568                 *duplex = DUPLEX_FULL;
3569                 break;
3570
3571         default:
3572                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3573                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3574                                  SPEED_10;
3575                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3576                                   DUPLEX_HALF;
3577                         break;
3578                 }
3579                 *speed = SPEED_INVALID;
3580                 *duplex = DUPLEX_INVALID;
3581                 break;
3582         }
3583 }
3584
3585 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3586 {
3587         int err = 0;
3588         u32 val, new_adv;
3589
3590         new_adv = ADVERTISE_CSMA;
3591         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3592         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3593
3594         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3595         if (err)
3596                 goto done;
3597
3598         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3599                 goto done;
3600
3601         new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3602
3603         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3604             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3605                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3606
3607         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3608         if (err)
3609                 goto done;
3610
3611         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3612                 goto done;
3613
3614         tw32(TG3_CPMU_EEE_MODE,
3615              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3616
3617         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3618         if (!err) {
3619                 u32 err2;
3620
3621                 val = 0;
3622                 /* Advertise 100-BaseTX EEE ability */
3623                 if (advertise & ADVERTISED_100baseT_Full)
3624                         val |= MDIO_AN_EEE_ADV_100TX;
3625                 /* Advertise 1000-BaseT EEE ability */
3626                 if (advertise & ADVERTISED_1000baseT_Full)
3627                         val |= MDIO_AN_EEE_ADV_1000T;
3628                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3629                 if (err)
3630                         val = 0;
3631
3632                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3633                 case ASIC_REV_5717:
3634                 case ASIC_REV_57765:
3635                 case ASIC_REV_5719:
3636                         /* If we advertised any eee advertisements above... */
3637                         if (val)
3638                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3639                                       MII_TG3_DSP_TAP26_RMRXSTO |
3640                                       MII_TG3_DSP_TAP26_OPCSINPT;
3641                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3642                         /* Fall through */
3643                 case ASIC_REV_5720:
3644                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3645                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3646                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3647                 }
3648
3649                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3650                 if (!err)
3651                         err = err2;
3652         }
3653
3654 done:
3655         return err;
3656 }
3657
3658 static void tg3_phy_copper_begin(struct tg3 *tp)
3659 {
3660         u32 new_adv;
3661         int i;
3662
3663         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3664                 new_adv = ADVERTISED_10baseT_Half |
3665                           ADVERTISED_10baseT_Full;
3666                 if (tg3_flag(tp, WOL_SPEED_100MB))
3667                         new_adv |= ADVERTISED_100baseT_Half |
3668                                    ADVERTISED_100baseT_Full;
3669
3670                 tg3_phy_autoneg_cfg(tp, new_adv,
3671                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3672         } else if (tp->link_config.speed == SPEED_INVALID) {
3673                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3674                         tp->link_config.advertising &=
3675                                 ~(ADVERTISED_1000baseT_Half |
3676                                   ADVERTISED_1000baseT_Full);
3677
3678                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3679                                     tp->link_config.flowctrl);
3680         } else {
3681                 /* Asking for a specific link mode. */
3682                 if (tp->link_config.speed == SPEED_1000) {
3683                         if (tp->link_config.duplex == DUPLEX_FULL)
3684                                 new_adv = ADVERTISED_1000baseT_Full;
3685                         else
3686                                 new_adv = ADVERTISED_1000baseT_Half;
3687                 } else if (tp->link_config.speed == SPEED_100) {
3688                         if (tp->link_config.duplex == DUPLEX_FULL)
3689                                 new_adv = ADVERTISED_100baseT_Full;
3690                         else
3691                                 new_adv = ADVERTISED_100baseT_Half;
3692                 } else {
3693                         if (tp->link_config.duplex == DUPLEX_FULL)
3694                                 new_adv = ADVERTISED_10baseT_Full;
3695                         else
3696                                 new_adv = ADVERTISED_10baseT_Half;
3697                 }
3698
3699                 tg3_phy_autoneg_cfg(tp, new_adv,
3700                                     tp->link_config.flowctrl);
3701         }
3702
3703         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3704             tp->link_config.speed != SPEED_INVALID) {
3705                 u32 bmcr, orig_bmcr;
3706
3707                 tp->link_config.active_speed = tp->link_config.speed;
3708                 tp->link_config.active_duplex = tp->link_config.duplex;
3709
3710                 bmcr = 0;
3711                 switch (tp->link_config.speed) {
3712                 default:
3713                 case SPEED_10:
3714                         break;
3715
3716                 case SPEED_100:
3717                         bmcr |= BMCR_SPEED100;
3718                         break;
3719
3720                 case SPEED_1000:
3721                         bmcr |= BMCR_SPEED1000;
3722                         break;
3723                 }
3724
3725                 if (tp->link_config.duplex == DUPLEX_FULL)
3726                         bmcr |= BMCR_FULLDPLX;
3727
3728                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3729                     (bmcr != orig_bmcr)) {
3730                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3731                         for (i = 0; i < 1500; i++) {
3732                                 u32 tmp;
3733
3734                                 udelay(10);
3735                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3736                                     tg3_readphy(tp, MII_BMSR, &tmp))
3737                                         continue;
3738                                 if (!(tmp & BMSR_LSTATUS)) {
3739                                         udelay(40);
3740                                         break;
3741                                 }
3742                         }
3743                         tg3_writephy(tp, MII_BMCR, bmcr);
3744                         udelay(40);
3745                 }
3746         } else {
3747                 tg3_writephy(tp, MII_BMCR,
3748                              BMCR_ANENABLE | BMCR_ANRESTART);
3749         }
3750 }
3751
3752 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3753 {
3754         int err;
3755
3756         /* Turn off tap power management. */
3757         /* Set Extended packet length bit */
3758         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3759
3760         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3761         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3762         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3763         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3764         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3765
3766         udelay(40);
3767
3768         return err;
3769 }
3770
3771 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3772 {
3773         u32 advmsk, tgtadv, advertising;
3774
3775         advertising = tp->link_config.advertising;
3776         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3777
3778         advmsk = ADVERTISE_ALL;
3779         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3780                 tgtadv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3781                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3782         }
3783
3784         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3785                 return false;
3786
3787         if ((*lcladv & advmsk) != tgtadv)
3788                 return false;
3789
3790         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3791                 u32 tg3_ctrl;
3792
3793                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3794
3795                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3796                         return false;
3797
3798                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3799                 if (tg3_ctrl != tgtadv)
3800                         return false;
3801         }
3802
3803         return true;
3804 }
3805
3806 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
3807 {
3808         u32 lpeth = 0;
3809
3810         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3811                 u32 val;
3812
3813                 if (tg3_readphy(tp, MII_STAT1000, &val))
3814                         return false;
3815
3816                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
3817         }
3818
3819         if (tg3_readphy(tp, MII_LPA, rmtadv))
3820                 return false;
3821
3822         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
3823         tp->link_config.rmt_adv = lpeth;
3824
3825         return true;
3826 }
3827
3828 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3829 {
3830         int current_link_up;
3831         u32 bmsr, val;
3832         u32 lcl_adv, rmt_adv;
3833         u16 current_speed;
3834         u8 current_duplex;
3835         int i, err;
3836
3837         tw32(MAC_EVENT, 0);
3838
3839         tw32_f(MAC_STATUS,
3840              (MAC_STATUS_SYNC_CHANGED |
3841               MAC_STATUS_CFG_CHANGED |
3842               MAC_STATUS_MI_COMPLETION |
3843               MAC_STATUS_LNKSTATE_CHANGED));
3844         udelay(40);
3845
3846         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3847                 tw32_f(MAC_MI_MODE,
3848                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3849                 udelay(80);
3850         }
3851
3852         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3853
3854         /* Some third-party PHYs need to be reset on link going
3855          * down.
3856          */
3857         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3858              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3859              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3860             netif_carrier_ok(tp->dev)) {
3861                 tg3_readphy(tp, MII_BMSR, &bmsr);
3862                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3863                     !(bmsr & BMSR_LSTATUS))
3864                         force_reset = 1;
3865         }
3866         if (force_reset)
3867                 tg3_phy_reset(tp);
3868
3869         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3870                 tg3_readphy(tp, MII_BMSR, &bmsr);
3871                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3872                     !tg3_flag(tp, INIT_COMPLETE))
3873                         bmsr = 0;
3874
3875                 if (!(bmsr & BMSR_LSTATUS)) {
3876                         err = tg3_init_5401phy_dsp(tp);
3877                         if (err)
3878                                 return err;
3879
3880                         tg3_readphy(tp, MII_BMSR, &bmsr);
3881                         for (i = 0; i < 1000; i++) {
3882                                 udelay(10);
3883                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3884                                     (bmsr & BMSR_LSTATUS)) {
3885                                         udelay(40);
3886                                         break;
3887                                 }
3888                         }
3889
3890                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3891                             TG3_PHY_REV_BCM5401_B0 &&
3892                             !(bmsr & BMSR_LSTATUS) &&
3893                             tp->link_config.active_speed == SPEED_1000) {
3894                                 err = tg3_phy_reset(tp);
3895                                 if (!err)
3896                                         err = tg3_init_5401phy_dsp(tp);
3897                                 if (err)
3898                                         return err;
3899                         }
3900                 }
3901         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3902                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3903                 /* 5701 {A0,B0} CRC bug workaround */
3904                 tg3_writephy(tp, 0x15, 0x0a75);
3905                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3906                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3907                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3908         }
3909
3910         /* Clear pending interrupts... */
3911         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3912         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3913
3914         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3915                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3916         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3917                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3918
3919         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3920             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3921                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3922                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3923                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3924                 else
3925                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3926         }
3927
3928         current_link_up = 0;
3929         current_speed = SPEED_INVALID;
3930         current_duplex = DUPLEX_INVALID;
3931         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
3932         tp->link_config.rmt_adv = 0;
3933
3934         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3935                 err = tg3_phy_auxctl_read(tp,
3936                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3937                                           &val);
3938                 if (!err && !(val & (1 << 10))) {
3939                         tg3_phy_auxctl_write(tp,
3940                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3941                                              val | (1 << 10));
3942                         goto relink;
3943                 }
3944         }
3945
3946         bmsr = 0;
3947         for (i = 0; i < 100; i++) {
3948                 tg3_readphy(tp, MII_BMSR, &bmsr);
3949                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3950                     (bmsr & BMSR_LSTATUS))
3951                         break;
3952                 udelay(40);
3953         }
3954
3955         if (bmsr & BMSR_LSTATUS) {
3956                 u32 aux_stat, bmcr;
3957
3958                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3959                 for (i = 0; i < 2000; i++) {
3960                         udelay(10);
3961                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3962                             aux_stat)
3963                                 break;
3964                 }
3965
3966                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3967                                              &current_speed,
3968                                              &current_duplex);
3969
3970                 bmcr = 0;
3971                 for (i = 0; i < 200; i++) {
3972                         tg3_readphy(tp, MII_BMCR, &bmcr);
3973                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3974                                 continue;
3975                         if (bmcr && bmcr != 0x7fff)
3976                                 break;
3977                         udelay(10);
3978                 }
3979
3980                 lcl_adv = 0;
3981                 rmt_adv = 0;
3982
3983                 tp->link_config.active_speed = current_speed;
3984                 tp->link_config.active_duplex = current_duplex;
3985
3986                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3987                         if ((bmcr & BMCR_ANENABLE) &&
3988                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
3989                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
3990                                 current_link_up = 1;
3991                 } else {
3992                         if (!(bmcr & BMCR_ANENABLE) &&
3993                             tp->link_config.speed == current_speed &&
3994                             tp->link_config.duplex == current_duplex &&
3995                             tp->link_config.flowctrl ==
3996                             tp->link_config.active_flowctrl) {
3997                                 current_link_up = 1;
3998                         }
3999                 }
4000
4001                 if (current_link_up == 1 &&
4002                     tp->link_config.active_duplex == DUPLEX_FULL) {
4003                         u32 reg, bit;
4004
4005                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4006                                 reg = MII_TG3_FET_GEN_STAT;
4007                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4008                         } else {
4009                                 reg = MII_TG3_EXT_STAT;
4010                                 bit = MII_TG3_EXT_STAT_MDIX;
4011                         }
4012
4013                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4014                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4015
4016                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4017                 }
4018         }
4019
4020 relink:
4021         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4022                 tg3_phy_copper_begin(tp);
4023
4024                 tg3_readphy(tp, MII_BMSR, &bmsr);
4025                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4026                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4027                         current_link_up = 1;
4028         }
4029
4030         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4031         if (current_link_up == 1) {
4032                 if (tp->link_config.active_speed == SPEED_100 ||
4033                     tp->link_config.active_speed == SPEED_10)
4034                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4035                 else
4036                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4037         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4038                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4039         else
4040                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4041
4042         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4043         if (tp->link_config.active_duplex == DUPLEX_HALF)
4044                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4045
4046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4047                 if (current_link_up == 1 &&
4048                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4049                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4050                 else
4051                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4052         }
4053
4054         /* ??? Without this setting Netgear GA302T PHY does not
4055          * ??? send/receive packets...
4056          */
4057         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4058             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4059                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4060                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4061                 udelay(80);
4062         }
4063
4064         tw32_f(MAC_MODE, tp->mac_mode);
4065         udelay(40);
4066
4067         tg3_phy_eee_adjust(tp, current_link_up);
4068
4069         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4070                 /* Polled via timer. */
4071                 tw32_f(MAC_EVENT, 0);
4072         } else {
4073                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4074         }
4075         udelay(40);
4076
4077         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4078             current_link_up == 1 &&
4079             tp->link_config.active_speed == SPEED_1000 &&
4080             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4081                 udelay(120);
4082                 tw32_f(MAC_STATUS,
4083                      (MAC_STATUS_SYNC_CHANGED |
4084                       MAC_STATUS_CFG_CHANGED));
4085                 udelay(40);
4086                 tg3_write_mem(tp,
4087                               NIC_SRAM_FIRMWARE_MBOX,
4088                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4089         }
4090
4091         /* Prevent send BD corruption. */
4092         if (tg3_flag(tp, CLKREQ_BUG)) {
4093                 u16 oldlnkctl, newlnkctl;
4094
4095                 pci_read_config_word(tp->pdev,
4096                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4097                                      &oldlnkctl);
4098                 if (tp->link_config.active_speed == SPEED_100 ||
4099                     tp->link_config.active_speed == SPEED_10)
4100                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4101                 else
4102                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4103                 if (newlnkctl != oldlnkctl)
4104                         pci_write_config_word(tp->pdev,
4105                                               pci_pcie_cap(tp->pdev) +
4106                                               PCI_EXP_LNKCTL, newlnkctl);
4107         }
4108
4109         if (current_link_up != netif_carrier_ok(tp->dev)) {
4110                 if (current_link_up)
4111                         netif_carrier_on(tp->dev);
4112                 else
4113                         netif_carrier_off(tp->dev);
4114                 tg3_link_report(tp);
4115         }
4116
4117         return 0;
4118 }
4119
4120 struct tg3_fiber_aneginfo {
4121         int state;
4122 #define ANEG_STATE_UNKNOWN              0
4123 #define ANEG_STATE_AN_ENABLE            1
4124 #define ANEG_STATE_RESTART_INIT         2
4125 #define ANEG_STATE_RESTART              3
4126 #define ANEG_STATE_DISABLE_LINK_OK      4
4127 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4128 #define ANEG_STATE_ABILITY_DETECT       6
4129 #define ANEG_STATE_ACK_DETECT_INIT      7
4130 #define ANEG_STATE_ACK_DETECT           8
4131 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4132 #define ANEG_STATE_COMPLETE_ACK         10
4133 #define ANEG_STATE_IDLE_DETECT_INIT     11
4134 #define ANEG_STATE_IDLE_DETECT          12
4135 #define ANEG_STATE_LINK_OK              13
4136 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4137 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4138
4139         u32 flags;
4140 #define MR_AN_ENABLE            0x00000001
4141 #define MR_RESTART_AN           0x00000002
4142 #define MR_AN_COMPLETE          0x00000004
4143 #define MR_PAGE_RX              0x00000008
4144 #define MR_NP_LOADED            0x00000010
4145 #define MR_TOGGLE_TX            0x00000020
4146 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4147 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4148 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4149 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4150 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4151 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4152 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4153 #define MR_TOGGLE_RX            0x00002000
4154 #define MR_NP_RX                0x00004000
4155
4156 #define MR_LINK_OK              0x80000000
4157
4158         unsigned long link_time, cur_time;
4159
4160         u32 ability_match_cfg;
4161         int ability_match_count;
4162
4163         char ability_match, idle_match, ack_match;
4164
4165         u32 txconfig, rxconfig;
4166 #define ANEG_CFG_NP             0x00000080
4167 #define ANEG_CFG_ACK            0x00000040
4168 #define ANEG_CFG_RF2            0x00000020
4169 #define ANEG_CFG_RF1            0x00000010
4170 #define ANEG_CFG_PS2            0x00000001
4171 #define ANEG_CFG_PS1            0x00008000
4172 #define ANEG_CFG_HD             0x00004000
4173 #define ANEG_CFG_FD             0x00002000
4174 #define ANEG_CFG_INVAL          0x00001f06
4175
4176 };
4177 #define ANEG_OK         0
4178 #define ANEG_DONE       1
4179 #define ANEG_TIMER_ENAB 2
4180 #define ANEG_FAILED     -1
4181
4182 #define ANEG_STATE_SETTLE_TIME  10000
4183
4184 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4185                                    struct tg3_fiber_aneginfo *ap)
4186 {
4187         u16 flowctrl;
4188         unsigned long delta;
4189         u32 rx_cfg_reg;
4190         int ret;
4191
4192         if (ap->state == ANEG_STATE_UNKNOWN) {
4193                 ap->rxconfig = 0;
4194                 ap->link_time = 0;
4195                 ap->cur_time = 0;
4196                 ap->ability_match_cfg = 0;
4197                 ap->ability_match_count = 0;
4198                 ap->ability_match = 0;
4199                 ap->idle_match = 0;
4200                 ap->ack_match = 0;
4201         }
4202         ap->cur_time++;
4203
4204         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4205                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4206
4207                 if (rx_cfg_reg != ap->ability_match_cfg) {
4208                         ap->ability_match_cfg = rx_cfg_reg;
4209                         ap->ability_match = 0;
4210                         ap->ability_match_count = 0;
4211                 } else {
4212                         if (++ap->ability_match_count > 1) {
4213                                 ap->ability_match = 1;
4214                                 ap->ability_match_cfg = rx_cfg_reg;
4215                         }
4216                 }
4217                 if (rx_cfg_reg & ANEG_CFG_ACK)
4218                         ap->ack_match = 1;
4219                 else
4220                         ap->ack_match = 0;
4221
4222                 ap->idle_match = 0;
4223         } else {
4224                 ap->idle_match = 1;
4225                 ap->ability_match_cfg = 0;
4226                 ap->ability_match_count = 0;
4227                 ap->ability_match = 0;
4228                 ap->ack_match = 0;
4229
4230                 rx_cfg_reg = 0;
4231         }
4232
4233         ap->rxconfig = rx_cfg_reg;
4234         ret = ANEG_OK;
4235
4236         switch (ap->state) {
4237         case ANEG_STATE_UNKNOWN:
4238                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4239                         ap->state = ANEG_STATE_AN_ENABLE;
4240
4241                 /* fallthru */
4242         case ANEG_STATE_AN_ENABLE:
4243                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4244                 if (ap->flags & MR_AN_ENABLE) {
4245                         ap->link_time = 0;
4246                         ap->cur_time = 0;
4247                         ap->ability_match_cfg = 0;
4248                         ap->ability_match_count = 0;
4249                         ap->ability_match = 0;
4250                         ap->idle_match = 0;
4251                         ap->ack_match = 0;
4252
4253                         ap->state = ANEG_STATE_RESTART_INIT;
4254                 } else {
4255                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4256                 }
4257                 break;
4258
4259         case ANEG_STATE_RESTART_INIT:
4260                 ap->link_time = ap->cur_time;
4261                 ap->flags &= ~(MR_NP_LOADED);
4262                 ap->txconfig = 0;
4263                 tw32(MAC_TX_AUTO_NEG, 0);
4264                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4265                 tw32_f(MAC_MODE, tp->mac_mode);
4266                 udelay(40);
4267
4268                 ret = ANEG_TIMER_ENAB;
4269                 ap->state = ANEG_STATE_RESTART;
4270
4271                 /* fallthru */
4272         case ANEG_STATE_RESTART:
4273                 delta = ap->cur_time - ap->link_time;
4274                 if (delta > ANEG_STATE_SETTLE_TIME)
4275                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4276                 else
4277                         ret = ANEG_TIMER_ENAB;
4278                 break;
4279
4280         case ANEG_STATE_DISABLE_LINK_OK:
4281                 ret = ANEG_DONE;
4282                 break;
4283
4284         case ANEG_STATE_ABILITY_DETECT_INIT:
4285                 ap->flags &= ~(MR_TOGGLE_TX);
4286                 ap->txconfig = ANEG_CFG_FD;
4287                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4288                 if (flowctrl & ADVERTISE_1000XPAUSE)
4289                         ap->txconfig |= ANEG_CFG_PS1;
4290                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4291                         ap->txconfig |= ANEG_CFG_PS2;
4292                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4293                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4294                 tw32_f(MAC_MODE, tp->mac_mode);
4295                 udelay(40);
4296
4297                 ap->state = ANEG_STATE_ABILITY_DETECT;
4298                 break;
4299
4300         case ANEG_STATE_ABILITY_DETECT:
4301                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4302                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4303                 break;
4304
4305         case ANEG_STATE_ACK_DETECT_INIT:
4306                 ap->txconfig |= ANEG_CFG_ACK;
4307                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4308                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4309                 tw32_f(MAC_MODE, tp->mac_mode);
4310                 udelay(40);
4311
4312                 ap->state = ANEG_STATE_ACK_DETECT;
4313
4314                 /* fallthru */
4315         case ANEG_STATE_ACK_DETECT:
4316                 if (ap->ack_match != 0) {
4317                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4318                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4319                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4320                         } else {
4321                                 ap->state = ANEG_STATE_AN_ENABLE;
4322                         }
4323                 } else if (ap->ability_match != 0 &&
4324                            ap->rxconfig == 0) {
4325                         ap->state = ANEG_STATE_AN_ENABLE;
4326                 }
4327                 break;
4328
4329         case ANEG_STATE_COMPLETE_ACK_INIT:
4330                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4331                         ret = ANEG_FAILED;
4332                         break;
4333                 }
4334                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4335                                MR_LP_ADV_HALF_DUPLEX |
4336                                MR_LP_ADV_SYM_PAUSE |
4337                                MR_LP_ADV_ASYM_PAUSE |
4338                                MR_LP_ADV_REMOTE_FAULT1 |
4339                                MR_LP_ADV_REMOTE_FAULT2 |
4340                                MR_LP_ADV_NEXT_PAGE |
4341                                MR_TOGGLE_RX |
4342                                MR_NP_RX);
4343                 if (ap->rxconfig & ANEG_CFG_FD)
4344                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4345                 if (ap->rxconfig & ANEG_CFG_HD)
4346                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4347                 if (ap->rxconfig & ANEG_CFG_PS1)
4348                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4349                 if (ap->rxconfig & ANEG_CFG_PS2)
4350                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4351                 if (ap->rxconfig & ANEG_CFG_RF1)
4352                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4353                 if (ap->rxconfig & ANEG_CFG_RF2)
4354                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4355                 if (ap->rxconfig & ANEG_CFG_NP)
4356                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4357
4358                 ap->link_time = ap->cur_time;
4359
4360                 ap->flags ^= (MR_TOGGLE_TX);
4361                 if (ap->rxconfig & 0x0008)
4362                         ap->flags |= MR_TOGGLE_RX;
4363                 if (ap->rxconfig & ANEG_CFG_NP)
4364                         ap->flags |= MR_NP_RX;
4365                 ap->flags |= MR_PAGE_RX;
4366
4367                 ap->state = ANEG_STATE_COMPLETE_ACK;
4368                 ret = ANEG_TIMER_ENAB;
4369                 break;
4370
4371         case ANEG_STATE_COMPLETE_ACK:
4372                 if (ap->ability_match != 0 &&
4373                     ap->rxconfig == 0) {
4374                         ap->state = ANEG_STATE_AN_ENABLE;
4375                         break;
4376                 }
4377                 delta = ap->cur_time - ap->link_time;
4378                 if (delta > ANEG_STATE_SETTLE_TIME) {
4379                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4380                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4381                         } else {
4382                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4383                                     !(ap->flags & MR_NP_RX)) {
4384                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4385                                 } else {
4386                                         ret = ANEG_FAILED;
4387                                 }
4388                         }
4389                 }
4390                 break;
4391
4392         case ANEG_STATE_IDLE_DETECT_INIT:
4393                 ap->link_time = ap->cur_time;
4394                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4395                 tw32_f(MAC_MODE, tp->mac_mode);
4396                 udelay(40);
4397
4398                 ap->state = ANEG_STATE_IDLE_DETECT;
4399                 ret = ANEG_TIMER_ENAB;
4400                 break;
4401
4402         case ANEG_STATE_IDLE_DETECT:
4403                 if (ap->ability_match != 0 &&
4404                     ap->rxconfig == 0) {
4405                         ap->state = ANEG_STATE_AN_ENABLE;
4406                         break;
4407                 }
4408                 delta = ap->cur_time - ap->link_time;
4409                 if (delta > ANEG_STATE_SETTLE_TIME) {
4410                         /* XXX another gem from the Broadcom driver :( */
4411                         ap->state = ANEG_STATE_LINK_OK;
4412                 }
4413                 break;
4414
4415         case ANEG_STATE_LINK_OK:
4416                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4417                 ret = ANEG_DONE;
4418                 break;
4419
4420         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4421                 /* ??? unimplemented */
4422                 break;
4423
4424         case ANEG_STATE_NEXT_PAGE_WAIT:
4425                 /* ??? unimplemented */
4426                 break;
4427
4428         default:
4429                 ret = ANEG_FAILED;
4430                 break;
4431         }
4432
4433         return ret;
4434 }
4435
4436 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4437 {
4438         int res = 0;
4439         struct tg3_fiber_aneginfo aninfo;
4440         int status = ANEG_FAILED;
4441         unsigned int tick;
4442         u32 tmp;
4443
4444         tw32_f(MAC_TX_AUTO_NEG, 0);
4445
4446         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4447         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4448         udelay(40);
4449
4450         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4451         udelay(40);
4452
4453         memset(&aninfo, 0, sizeof(aninfo));
4454         aninfo.flags |= MR_AN_ENABLE;
4455         aninfo.state = ANEG_STATE_UNKNOWN;
4456         aninfo.cur_time = 0;
4457         tick = 0;
4458         while (++tick < 195000) {
4459                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4460                 if (status == ANEG_DONE || status == ANEG_FAILED)
4461                         break;
4462
4463                 udelay(1);
4464         }
4465
4466         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4467         tw32_f(MAC_MODE, tp->mac_mode);
4468         udelay(40);
4469
4470         *txflags = aninfo.txconfig;
4471         *rxflags = aninfo.flags;
4472
4473         if (status == ANEG_DONE &&
4474             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4475                              MR_LP_ADV_FULL_DUPLEX)))
4476                 res = 1;
4477
4478         return res;
4479 }
4480
4481 static void tg3_init_bcm8002(struct tg3 *tp)
4482 {
4483         u32 mac_status = tr32(MAC_STATUS);
4484         int i;
4485
4486         /* Reset when initting first time or we have a link. */
4487         if (tg3_flag(tp, INIT_COMPLETE) &&
4488             !(mac_status & MAC_STATUS_PCS_SYNCED))
4489                 return;
4490
4491         /* Set PLL lock range. */
4492         tg3_writephy(tp, 0x16, 0x8007);
4493
4494         /* SW reset */
4495         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4496
4497         /* Wait for reset to complete. */
4498         /* XXX schedule_timeout() ... */
4499         for (i = 0; i < 500; i++)
4500                 udelay(10);
4501
4502         /* Config mode; select PMA/Ch 1 regs. */
4503         tg3_writephy(tp, 0x10, 0x8411);
4504
4505         /* Enable auto-lock and comdet, select txclk for tx. */
4506         tg3_writephy(tp, 0x11, 0x0a10);
4507
4508         tg3_writephy(tp, 0x18, 0x00a0);
4509         tg3_writephy(tp, 0x16, 0x41ff);
4510
4511         /* Assert and deassert POR. */
4512         tg3_writephy(tp, 0x13, 0x0400);
4513         udelay(40);
4514         tg3_writephy(tp, 0x13, 0x0000);
4515
4516         tg3_writephy(tp, 0x11, 0x0a50);
4517         udelay(40);
4518         tg3_writephy(tp, 0x11, 0x0a10);
4519
4520         /* Wait for signal to stabilize */
4521         /* XXX schedule_timeout() ... */
4522         for (i = 0; i < 15000; i++)
4523                 udelay(10);
4524
4525         /* Deselect the channel register so we can read the PHYID
4526          * later.
4527          */
4528         tg3_writephy(tp, 0x10, 0x8011);
4529 }
4530
4531 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4532 {
4533         u16 flowctrl;
4534         u32 sg_dig_ctrl, sg_dig_status;
4535         u32 serdes_cfg, expected_sg_dig_ctrl;
4536         int workaround, port_a;
4537         int current_link_up;
4538
4539         serdes_cfg = 0;
4540         expected_sg_dig_ctrl = 0;
4541         workaround = 0;
4542         port_a = 1;
4543         current_link_up = 0;
4544
4545         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4546             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4547                 workaround = 1;
4548                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4549                         port_a = 0;
4550
4551                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4552                 /* preserve bits 20-23 for voltage regulator */
4553                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4554         }
4555
4556         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4557
4558         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4559                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4560                         if (workaround) {
4561                                 u32 val = serdes_cfg;
4562
4563                                 if (port_a)
4564                                         val |= 0xc010000;
4565                                 else
4566                                         val |= 0x4010000;
4567                                 tw32_f(MAC_SERDES_CFG, val);
4568                         }
4569
4570                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4571                 }
4572                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4573                         tg3_setup_flow_control(tp, 0, 0);
4574                         current_link_up = 1;
4575                 }
4576                 goto out;
4577         }
4578
4579         /* Want auto-negotiation.  */
4580         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4581
4582         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4583         if (flowctrl & ADVERTISE_1000XPAUSE)
4584                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4585         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4586                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4587
4588         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4589                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4590                     tp->serdes_counter &&
4591                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4592                                     MAC_STATUS_RCVD_CFG)) ==
4593                      MAC_STATUS_PCS_SYNCED)) {
4594                         tp->serdes_counter--;
4595                         current_link_up = 1;
4596                         goto out;
4597                 }
4598 restart_autoneg:
4599                 if (workaround)
4600                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4601                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4602                 udelay(5);
4603                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4604
4605                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4606                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4607         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4608                                  MAC_STATUS_SIGNAL_DET)) {
4609                 sg_dig_status = tr32(SG_DIG_STATUS);
4610                 mac_status = tr32(MAC_STATUS);
4611
4612                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4613                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4614                         u32 local_adv = 0, remote_adv = 0;
4615
4616                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4617                                 local_adv |= ADVERTISE_1000XPAUSE;
4618                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4619                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4620
4621                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4622                                 remote_adv |= LPA_1000XPAUSE;
4623                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4624                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4625
4626                         tp->link_config.rmt_adv =
4627                                            mii_adv_to_ethtool_adv_x(remote_adv);
4628
4629                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4630                         current_link_up = 1;
4631                         tp->serdes_counter = 0;
4632                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4633                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4634                         if (tp->serdes_counter)
4635                                 tp->serdes_counter--;
4636                         else {
4637                                 if (workaround) {
4638                                         u32 val = serdes_cfg;
4639
4640                                         if (port_a)
4641                                                 val |= 0xc010000;
4642                                         else
4643                                                 val |= 0x4010000;
4644
4645                                         tw32_f(MAC_SERDES_CFG, val);
4646                                 }
4647
4648                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4649                                 udelay(40);
4650
4651                                 /* Link parallel detection - link is up */
4652                                 /* only if we have PCS_SYNC and not */
4653                                 /* receiving config code words */
4654                                 mac_status = tr32(MAC_STATUS);
4655                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4656                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4657                                         tg3_setup_flow_control(tp, 0, 0);
4658                                         current_link_up = 1;
4659                                         tp->phy_flags |=
4660                                                 TG3_PHYFLG_PARALLEL_DETECT;
4661                                         tp->serdes_counter =
4662                                                 SERDES_PARALLEL_DET_TIMEOUT;
4663                                 } else
4664                                         goto restart_autoneg;
4665                         }
4666                 }
4667         } else {
4668                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4669                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4670         }
4671
4672 out:
4673         return current_link_up;
4674 }
4675
4676 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4677 {
4678         int current_link_up = 0;
4679
4680         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4681                 goto out;
4682
4683         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4684                 u32 txflags, rxflags;
4685                 int i;
4686
4687                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4688                         u32 local_adv = 0, remote_adv = 0;
4689
4690                         if (txflags & ANEG_CFG_PS1)
4691                                 local_adv |= ADVERTISE_1000XPAUSE;
4692                         if (txflags & ANEG_CFG_PS2)
4693                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4694
4695                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4696                                 remote_adv |= LPA_1000XPAUSE;
4697                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4698                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4699
4700                         tp->link_config.rmt_adv =
4701                                            mii_adv_to_ethtool_adv_x(remote_adv);
4702
4703                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4704
4705                         current_link_up = 1;
4706                 }
4707                 for (i = 0; i < 30; i++) {
4708                         udelay(20);
4709                         tw32_f(MAC_STATUS,
4710                                (MAC_STATUS_SYNC_CHANGED |
4711                                 MAC_STATUS_CFG_CHANGED));
4712                         udelay(40);
4713                         if ((tr32(MAC_STATUS) &
4714                              (MAC_STATUS_SYNC_CHANGED |
4715                               MAC_STATUS_CFG_CHANGED)) == 0)
4716                                 break;
4717                 }
4718
4719                 mac_status = tr32(MAC_STATUS);
4720                 if (current_link_up == 0 &&
4721                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4722                     !(mac_status & MAC_STATUS_RCVD_CFG))
4723                         current_link_up = 1;
4724         } else {
4725                 tg3_setup_flow_control(tp, 0, 0);
4726
4727                 /* Forcing 1000FD link up. */
4728                 current_link_up = 1;
4729
4730                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4731                 udelay(40);
4732
4733                 tw32_f(MAC_MODE, tp->mac_mode);
4734                 udelay(40);
4735         }
4736
4737 out:
4738         return current_link_up;
4739 }
4740
4741 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4742 {
4743         u32 orig_pause_cfg;
4744         u16 orig_active_speed;
4745         u8 orig_active_duplex;
4746         u32 mac_status;
4747         int current_link_up;
4748         int i;
4749
4750         orig_pause_cfg = tp->link_config.active_flowctrl;
4751         orig_active_speed = tp->link_config.active_speed;
4752         orig_active_duplex = tp->link_config.active_duplex;
4753
4754         if (!tg3_flag(tp, HW_AUTONEG) &&
4755             netif_carrier_ok(tp->dev) &&
4756             tg3_flag(tp, INIT_COMPLETE)) {
4757                 mac_status = tr32(MAC_STATUS);
4758                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4759                                MAC_STATUS_SIGNAL_DET |
4760                                MAC_STATUS_CFG_CHANGED |
4761                                MAC_STATUS_RCVD_CFG);
4762                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4763                                    MAC_STATUS_SIGNAL_DET)) {
4764                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4765                                             MAC_STATUS_CFG_CHANGED));
4766                         return 0;
4767                 }
4768         }
4769
4770         tw32_f(MAC_TX_AUTO_NEG, 0);
4771
4772         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4773         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4774         tw32_f(MAC_MODE, tp->mac_mode);
4775         udelay(40);
4776
4777         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4778                 tg3_init_bcm8002(tp);
4779
4780         /* Enable link change event even when serdes polling.  */
4781         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4782         udelay(40);
4783
4784         current_link_up = 0;
4785         tp->link_config.rmt_adv = 0;
4786         mac_status = tr32(MAC_STATUS);
4787
4788         if (tg3_flag(tp, HW_AUTONEG))
4789                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4790         else
4791                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4792
4793         tp->napi[0].hw_status->status =
4794                 (SD_STATUS_UPDATED |
4795                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4796
4797         for (i = 0; i < 100; i++) {
4798                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4799                                     MAC_STATUS_CFG_CHANGED));
4800                 udelay(5);
4801                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4802                                          MAC_STATUS_CFG_CHANGED |
4803                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4804                         break;
4805         }
4806
4807         mac_status = tr32(MAC_STATUS);
4808         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4809                 current_link_up = 0;
4810                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4811                     tp->serdes_counter == 0) {
4812                         tw32_f(MAC_MODE, (tp->mac_mode |
4813                                           MAC_MODE_SEND_CONFIGS));
4814                         udelay(1);
4815                         tw32_f(MAC_MODE, tp->mac_mode);
4816                 }
4817         }
4818
4819         if (current_link_up == 1) {
4820                 tp->link_config.active_speed = SPEED_1000;
4821                 tp->link_config.active_duplex = DUPLEX_FULL;
4822                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4823                                     LED_CTRL_LNKLED_OVERRIDE |
4824                                     LED_CTRL_1000MBPS_ON));
4825         } else {
4826                 tp->link_config.active_speed = SPEED_INVALID;
4827                 tp->link_config.active_duplex = DUPLEX_INVALID;
4828                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4829                                     LED_CTRL_LNKLED_OVERRIDE |
4830                                     LED_CTRL_TRAFFIC_OVERRIDE));
4831         }
4832
4833         if (current_link_up != netif_carrier_ok(tp->dev)) {
4834                 if (current_link_up)
4835                         netif_carrier_on(tp->dev);
4836                 else
4837                         netif_carrier_off(tp->dev);
4838                 tg3_link_report(tp);
4839         } else {
4840                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4841                 if (orig_pause_cfg != now_pause_cfg ||
4842                     orig_active_speed != tp->link_config.active_speed ||
4843                     orig_active_duplex != tp->link_config.active_duplex)
4844                         tg3_link_report(tp);
4845         }
4846
4847         return 0;
4848 }
4849
4850 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4851 {
4852         int current_link_up, err = 0;
4853         u32 bmsr, bmcr;
4854         u16 current_speed;
4855         u8 current_duplex;
4856         u32 local_adv, remote_adv;
4857
4858         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4859         tw32_f(MAC_MODE, tp->mac_mode);
4860         udelay(40);
4861
4862         tw32(MAC_EVENT, 0);
4863
4864         tw32_f(MAC_STATUS,
4865              (MAC_STATUS_SYNC_CHANGED |
4866               MAC_STATUS_CFG_CHANGED |
4867               MAC_STATUS_MI_COMPLETION |
4868               MAC_STATUS_LNKSTATE_CHANGED));
4869         udelay(40);
4870
4871         if (force_reset)
4872                 tg3_phy_reset(tp);
4873
4874         current_link_up = 0;
4875         current_speed = SPEED_INVALID;
4876         current_duplex = DUPLEX_INVALID;
4877         tp->link_config.rmt_adv = 0;
4878
4879         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4880         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4881         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4882                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4883                         bmsr |= BMSR_LSTATUS;
4884                 else
4885                         bmsr &= ~BMSR_LSTATUS;
4886         }
4887
4888         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4889
4890         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4891             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4892                 /* do nothing, just check for link up at the end */
4893         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4894                 u32 adv, newadv;
4895
4896                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4897                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4898                                  ADVERTISE_1000XPAUSE |
4899                                  ADVERTISE_1000XPSE_ASYM |
4900                                  ADVERTISE_SLCT);
4901
4902                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4903                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
4904
4905                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
4906                         tg3_writephy(tp, MII_ADVERTISE, newadv);
4907                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4908                         tg3_writephy(tp, MII_BMCR, bmcr);
4909
4910                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4911                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4912                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4913
4914                         return err;
4915                 }
4916         } else {
4917                 u32 new_bmcr;
4918
4919                 bmcr &= ~BMCR_SPEED1000;
4920                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4921
4922                 if (tp->link_config.duplex == DUPLEX_FULL)
4923                         new_bmcr |= BMCR_FULLDPLX;
4924
4925                 if (new_bmcr != bmcr) {
4926                         /* BMCR_SPEED1000 is a reserved bit that needs
4927                          * to be set on write.
4928                          */
4929                         new_bmcr |= BMCR_SPEED1000;
4930
4931                         /* Force a linkdown */
4932                         if (netif_carrier_ok(tp->dev)) {
4933                                 u32 adv;
4934
4935                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4936                                 adv &= ~(ADVERTISE_1000XFULL |
4937                                          ADVERTISE_1000XHALF |
4938                                          ADVERTISE_SLCT);
4939                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4940                                 tg3_writephy(tp, MII_BMCR, bmcr |
4941                                                            BMCR_ANRESTART |
4942                                                            BMCR_ANENABLE);
4943                                 udelay(10);
4944                                 netif_carrier_off(tp->dev);
4945                         }
4946                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4947                         bmcr = new_bmcr;
4948                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4949                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4950                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4951                             ASIC_REV_5714) {
4952                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4953                                         bmsr |= BMSR_LSTATUS;
4954                                 else
4955                                         bmsr &= ~BMSR_LSTATUS;
4956                         }
4957                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4958                 }
4959         }
4960
4961         if (bmsr & BMSR_LSTATUS) {
4962                 current_speed = SPEED_1000;
4963                 current_link_up = 1;
4964                 if (bmcr & BMCR_FULLDPLX)
4965                         current_duplex = DUPLEX_FULL;
4966                 else
4967                         current_duplex = DUPLEX_HALF;
4968
4969                 local_adv = 0;
4970                 remote_adv = 0;
4971
4972                 if (bmcr & BMCR_ANENABLE) {
4973                         u32 common;
4974
4975                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4976                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4977                         common = local_adv & remote_adv;
4978                         if (common & (ADVERTISE_1000XHALF |
4979                                       ADVERTISE_1000XFULL)) {
4980                                 if (common & ADVERTISE_1000XFULL)
4981                                         current_duplex = DUPLEX_FULL;
4982                                 else
4983                                         current_duplex = DUPLEX_HALF;
4984
4985                                 tp->link_config.rmt_adv =
4986                                            mii_adv_to_ethtool_adv_x(remote_adv);
4987                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4988                                 /* Link is up via parallel detect */
4989                         } else {
4990                                 current_link_up = 0;
4991                         }
4992                 }
4993         }
4994
4995         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4996                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4997
4998         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4999         if (tp->link_config.active_duplex == DUPLEX_HALF)
5000                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5001
5002         tw32_f(MAC_MODE, tp->mac_mode);
5003         udelay(40);
5004
5005         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5006
5007         tp->link_config.active_speed = current_speed;
5008         tp->link_config.active_duplex = current_duplex;
5009
5010         if (current_link_up != netif_carrier_ok(tp->dev)) {
5011                 if (current_link_up)
5012                         netif_carrier_on(tp->dev);
5013                 else {
5014                         netif_carrier_off(tp->dev);
5015                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5016                 }
5017                 tg3_link_report(tp);
5018         }
5019         return err;
5020 }
5021
5022 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5023 {
5024         if (tp->serdes_counter) {
5025                 /* Give autoneg time to complete. */
5026                 tp->serdes_counter--;
5027                 return;
5028         }
5029
5030         if (!netif_carrier_ok(tp->dev) &&
5031             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5032                 u32 bmcr;
5033
5034                 tg3_readphy(tp, MII_BMCR, &bmcr);
5035                 if (bmcr & BMCR_ANENABLE) {
5036                         u32 phy1, phy2;
5037
5038                         /* Select shadow register 0x1f */
5039                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5040                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5041
5042                         /* Select expansion interrupt status register */
5043                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5044                                          MII_TG3_DSP_EXP1_INT_STAT);
5045                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5046                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5047
5048                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5049                                 /* We have signal detect and not receiving
5050                                  * config code words, link is up by parallel
5051                                  * detection.
5052                                  */
5053
5054                                 bmcr &= ~BMCR_ANENABLE;
5055                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5056                                 tg3_writephy(tp, MII_BMCR, bmcr);
5057                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5058                         }
5059                 }
5060         } else if (netif_carrier_ok(tp->dev) &&
5061                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5062                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5063                 u32 phy2;
5064
5065                 /* Select expansion interrupt status register */
5066                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5067                                  MII_TG3_DSP_EXP1_INT_STAT);
5068                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5069                 if (phy2 & 0x20) {
5070                         u32 bmcr;
5071
5072                         /* Config code words received, turn on autoneg. */
5073                         tg3_readphy(tp, MII_BMCR, &bmcr);
5074                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5075
5076                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5077
5078                 }
5079         }
5080 }
5081
5082 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5083 {
5084         u32 val;
5085         int err;
5086
5087         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5088                 err = tg3_setup_fiber_phy(tp, force_reset);
5089         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5090                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5091         else
5092                 err = tg3_setup_copper_phy(tp, force_reset);
5093
5094         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5095                 u32 scale;
5096
5097                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5098                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5099                         scale = 65;
5100                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5101                         scale = 6;
5102                 else
5103                         scale = 12;
5104
5105                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5106                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5107                 tw32(GRC_MISC_CFG, val);
5108         }
5109
5110         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5111               (6 << TX_LENGTHS_IPG_SHIFT);
5112         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5113                 val |= tr32(MAC_TX_LENGTHS) &
5114                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5115                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5116
5117         if (tp->link_config.active_speed == SPEED_1000 &&
5118             tp->link_config.active_duplex == DUPLEX_HALF)
5119                 tw32(MAC_TX_LENGTHS, val |
5120                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5121         else
5122                 tw32(MAC_TX_LENGTHS, val |
5123                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5124
5125         if (!tg3_flag(tp, 5705_PLUS)) {
5126                 if (netif_carrier_ok(tp->dev)) {
5127                         tw32(HOSTCC_STAT_COAL_TICKS,
5128                              tp->coal.stats_block_coalesce_usecs);
5129                 } else {
5130                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5131                 }
5132         }
5133
5134         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5135                 val = tr32(PCIE_PWR_MGMT_THRESH);
5136                 if (!netif_carrier_ok(tp->dev))
5137                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5138                               tp->pwrmgmt_thresh;
5139                 else
5140                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5141                 tw32(PCIE_PWR_MGMT_THRESH, val);
5142         }
5143
5144         return err;
5145 }
5146
5147 static inline int tg3_irq_sync(struct tg3 *tp)
5148 {
5149         return tp->irq_sync;
5150 }
5151
5152 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5153 {
5154         int i;
5155
5156         dst = (u32 *)((u8 *)dst + off);
5157         for (i = 0; i < len; i += sizeof(u32))
5158                 *dst++ = tr32(off + i);
5159 }
5160
5161 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5162 {
5163         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5164         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5165         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5166         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5167         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5168         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5169         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5170         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5171         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5172         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5173         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5174         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5175         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5176         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5177         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5178         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5179         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5180         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5181         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5182
5183         if (tg3_flag(tp, SUPPORT_MSIX))
5184                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5185
5186         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5187         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5188         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5189         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5190         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5191         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5192         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5193         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5194
5195         if (!tg3_flag(tp, 5705_PLUS)) {
5196                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5197                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5198                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5199         }
5200
5201         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5202         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5203         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5204         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5205         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5206
5207         if (tg3_flag(tp, NVRAM))
5208                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5209 }
5210
5211 static void tg3_dump_state(struct tg3 *tp)
5212 {
5213         int i;
5214         u32 *regs;
5215
5216         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5217         if (!regs) {
5218                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5219                 return;
5220         }
5221
5222         if (tg3_flag(tp, PCI_EXPRESS)) {
5223                 /* Read up to but not including private PCI registers */
5224                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5225                         regs[i / sizeof(u32)] = tr32(i);
5226         } else
5227                 tg3_dump_legacy_regs(tp, regs);
5228
5229         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5230                 if (!regs[i + 0] && !regs[i + 1] &&
5231                     !regs[i + 2] && !regs[i + 3])
5232                         continue;
5233
5234                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5235                            i * 4,
5236                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5237         }
5238
5239         kfree(regs);
5240
5241         for (i = 0; i < tp->irq_cnt; i++) {
5242                 struct tg3_napi *tnapi = &tp->napi[i];
5243
5244                 /* SW status block */
5245                 netdev_err(tp->dev,
5246                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5247                            i,
5248                            tnapi->hw_status->status,
5249                            tnapi->hw_status->status_tag,
5250                            tnapi->hw_status->rx_jumbo_consumer,
5251                            tnapi->hw_status->rx_consumer,
5252                            tnapi->hw_status->rx_mini_consumer,
5253                            tnapi->hw_status->idx[0].rx_producer,
5254                            tnapi->hw_status->idx[0].tx_consumer);
5255
5256                 netdev_err(tp->dev,
5257                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5258                            i,
5259                            tnapi->last_tag, tnapi->last_irq_tag,
5260                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5261                            tnapi->rx_rcb_ptr,
5262                            tnapi->prodring.rx_std_prod_idx,
5263                            tnapi->prodring.rx_std_cons_idx,
5264                            tnapi->prodring.rx_jmb_prod_idx,
5265                            tnapi->prodring.rx_jmb_cons_idx);
5266         }
5267 }
5268
5269 /* This is called whenever we suspect that the system chipset is re-
5270  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5271  * is bogus tx completions. We try to recover by setting the
5272  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5273  * in the workqueue.
5274  */
5275 static void tg3_tx_recover(struct tg3 *tp)
5276 {
5277         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5278                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5279
5280         netdev_warn(tp->dev,
5281                     "The system may be re-ordering memory-mapped I/O "
5282                     "cycles to the network device, attempting to recover. "
5283                     "Please report the problem to the driver maintainer "
5284                     "and include system chipset information.\n");
5285
5286         spin_lock(&tp->lock);
5287         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5288         spin_unlock(&tp->lock);
5289 }
5290
5291 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5292 {
5293         /* Tell compiler to fetch tx indices from memory. */
5294         barrier();
5295         return tnapi->tx_pending -
5296                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5297 }
5298
5299 /* Tigon3 never reports partial packet sends.  So we do not
5300  * need special logic to handle SKBs that have not had all
5301  * of their frags sent yet, like SunGEM does.
5302  */
5303 static void tg3_tx(struct tg3_napi *tnapi)
5304 {
5305         struct tg3 *tp = tnapi->tp;
5306         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5307         u32 sw_idx = tnapi->tx_cons;
5308         struct netdev_queue *txq;
5309         int index = tnapi - tp->napi;
5310         unsigned int pkts_compl = 0, bytes_compl = 0;
5311
5312         if (tg3_flag(tp, ENABLE_TSS))
5313                 index--;
5314
5315         txq = netdev_get_tx_queue(tp->dev, index);
5316
5317         while (sw_idx != hw_idx) {
5318                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5319                 struct sk_buff *skb = ri->skb;
5320                 int i, tx_bug = 0;
5321
5322                 if (unlikely(skb == NULL)) {
5323                         tg3_tx_recover(tp);
5324                         return;
5325                 }
5326
5327                 pci_unmap_single(tp->pdev,
5328                                  dma_unmap_addr(ri, mapping),
5329                                  skb_headlen(skb),
5330                                  PCI_DMA_TODEVICE);
5331
5332                 ri->skb = NULL;
5333
5334                 while (ri->fragmented) {
5335                         ri->fragmented = false;
5336                         sw_idx = NEXT_TX(sw_idx);
5337                         ri = &tnapi->tx_buffers[sw_idx];
5338                 }
5339
5340                 sw_idx = NEXT_TX(sw_idx);
5341
5342                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5343                         ri = &tnapi->tx_buffers[sw_idx];
5344                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5345                                 tx_bug = 1;
5346
5347                         pci_unmap_page(tp->pdev,
5348                                        dma_unmap_addr(ri, mapping),
5349                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5350                                        PCI_DMA_TODEVICE);
5351
5352                         while (ri->fragmented) {
5353                                 ri->fragmented = false;
5354                                 sw_idx = NEXT_TX(sw_idx);
5355                                 ri = &tnapi->tx_buffers[sw_idx];
5356                         }
5357
5358                         sw_idx = NEXT_TX(sw_idx);
5359                 }
5360
5361                 pkts_compl++;
5362                 bytes_compl += skb->len;
5363
5364                 dev_kfree_skb(skb);
5365
5366                 if (unlikely(tx_bug)) {
5367                         tg3_tx_recover(tp);
5368                         return;
5369                 }
5370         }
5371
5372         netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5373
5374         tnapi->tx_cons = sw_idx;
5375
5376         /* Need to make the tx_cons update visible to tg3_start_xmit()
5377          * before checking for netif_queue_stopped().  Without the
5378          * memory barrier, there is a small possibility that tg3_start_xmit()
5379          * will miss it and cause the queue to be stopped forever.
5380          */
5381         smp_mb();
5382
5383         if (unlikely(netif_tx_queue_stopped(txq) &&
5384                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5385                 __netif_tx_lock(txq, smp_processor_id());
5386                 if (netif_tx_queue_stopped(txq) &&
5387                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5388                         netif_tx_wake_queue(txq);
5389                 __netif_tx_unlock(txq);
5390         }
5391 }
5392
5393 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5394 {
5395         if (!ri->data)
5396                 return;
5397
5398         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5399                          map_sz, PCI_DMA_FROMDEVICE);
5400         kfree(ri->data);
5401         ri->data = NULL;
5402 }
5403
5404 /* Returns size of skb allocated or < 0 on error.
5405  *
5406  * We only need to fill in the address because the other members
5407  * of the RX descriptor are invariant, see tg3_init_rings.
5408  *
5409  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5410  * posting buffers we only dirty the first cache line of the RX
5411  * descriptor (containing the address).  Whereas for the RX status
5412  * buffers the cpu only reads the last cacheline of the RX descriptor
5413  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5414  */
5415 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5416                             u32 opaque_key, u32 dest_idx_unmasked)
5417 {
5418         struct tg3_rx_buffer_desc *desc;
5419         struct ring_info *map;
5420         u8 *data;
5421         dma_addr_t mapping;
5422         int skb_size, data_size, dest_idx;
5423
5424         switch (opaque_key) {
5425         case RXD_OPAQUE_RING_STD:
5426                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5427                 desc = &tpr->rx_std[dest_idx];
5428                 map = &tpr->rx_std_buffers[dest_idx];
5429                 data_size = tp->rx_pkt_map_sz;
5430                 break;
5431
5432         case RXD_OPAQUE_RING_JUMBO:
5433                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5434                 desc = &tpr->rx_jmb[dest_idx].std;
5435                 map = &tpr->rx_jmb_buffers[dest_idx];
5436                 data_size = TG3_RX_JMB_MAP_SZ;
5437                 break;
5438
5439         default:
5440                 return -EINVAL;
5441         }
5442
5443         /* Do not overwrite any of the map or rp information
5444          * until we are sure we can commit to a new buffer.
5445          *
5446          * Callers depend upon this behavior and assume that
5447          * we leave everything unchanged if we fail.
5448          */
5449         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5450                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5451         data = kmalloc(skb_size, GFP_ATOMIC);
5452         if (!data)
5453                 return -ENOMEM;
5454
5455         mapping = pci_map_single(tp->pdev,
5456                                  data + TG3_RX_OFFSET(tp),
5457                                  data_size,
5458                                  PCI_DMA_FROMDEVICE);
5459         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5460                 kfree(data);
5461                 return -EIO;
5462         }
5463
5464         map->data = data;
5465         dma_unmap_addr_set(map, mapping, mapping);
5466
5467         desc->addr_hi = ((u64)mapping >> 32);
5468         desc->addr_lo = ((u64)mapping & 0xffffffff);
5469
5470         return data_size;
5471 }
5472
5473 /* We only need to move over in the address because the other
5474  * members of the RX descriptor are invariant.  See notes above
5475  * tg3_alloc_rx_data for full details.
5476  */
5477 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5478                            struct tg3_rx_prodring_set *dpr,
5479                            u32 opaque_key, int src_idx,
5480                            u32 dest_idx_unmasked)
5481 {
5482         struct tg3 *tp = tnapi->tp;
5483         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5484         struct ring_info *src_map, *dest_map;
5485         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5486         int dest_idx;
5487
5488         switch (opaque_key) {
5489         case RXD_OPAQUE_RING_STD:
5490                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5491                 dest_desc = &dpr->rx_std[dest_idx];
5492                 dest_map = &dpr->rx_std_buffers[dest_idx];
5493                 src_desc = &spr->rx_std[src_idx];
5494                 src_map = &spr->rx_std_buffers[src_idx];
5495                 break;
5496
5497         case RXD_OPAQUE_RING_JUMBO:
5498                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5499                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5500                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5501                 src_desc = &spr->rx_jmb[src_idx].std;
5502                 src_map = &spr->rx_jmb_buffers[src_idx];
5503                 break;
5504
5505         default:
5506                 return;
5507         }
5508
5509         dest_map->data = src_map->data;
5510         dma_unmap_addr_set(dest_map, mapping,
5511                            dma_unmap_addr(src_map, mapping));
5512         dest_desc->addr_hi = src_desc->addr_hi;
5513         dest_desc->addr_lo = src_desc->addr_lo;
5514
5515         /* Ensure that the update to the skb happens after the physical
5516          * addresses have been transferred to the new BD location.
5517          */
5518         smp_wmb();
5519
5520         src_map->data = NULL;
5521 }
5522
5523 /* The RX ring scheme is composed of multiple rings which post fresh
5524  * buffers to the chip, and one special ring the chip uses to report
5525  * status back to the host.
5526  *
5527  * The special ring reports the status of received packets to the
5528  * host.  The chip does not write into the original descriptor the
5529  * RX buffer was obtained from.  The chip simply takes the original
5530  * descriptor as provided by the host, updates the status and length
5531  * field, then writes this into the next status ring entry.
5532  *
5533  * Each ring the host uses to post buffers to the chip is described
5534  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5535  * it is first placed into the on-chip ram.  When the packet's length
5536  * is known, it walks down the TG3_BDINFO entries to select the ring.
5537  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5538  * which is within the range of the new packet's length is chosen.
5539  *
5540  * The "separate ring for rx status" scheme may sound queer, but it makes
5541  * sense from a cache coherency perspective.  If only the host writes
5542  * to the buffer post rings, and only the chip writes to the rx status
5543  * rings, then cache lines never move beyond shared-modified state.
5544  * If both the host and chip were to write into the same ring, cache line
5545  * eviction could occur since both entities want it in an exclusive state.
5546  */
5547 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5548 {
5549         struct tg3 *tp = tnapi->tp;
5550         u32 work_mask, rx_std_posted = 0;
5551         u32 std_prod_idx, jmb_prod_idx;
5552         u32 sw_idx = tnapi->rx_rcb_ptr;
5553         u16 hw_idx;
5554         int received;
5555         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5556
5557         hw_idx = *(tnapi->rx_rcb_prod_idx);
5558         /*
5559          * We need to order the read of hw_idx and the read of
5560          * the opaque cookie.
5561          */
5562         rmb();
5563         work_mask = 0;
5564         received = 0;
5565         std_prod_idx = tpr->rx_std_prod_idx;
5566         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5567         while (sw_idx != hw_idx && budget > 0) {
5568                 struct ring_info *ri;
5569                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5570                 unsigned int len;
5571                 struct sk_buff *skb;
5572                 dma_addr_t dma_addr;
5573                 u32 opaque_key, desc_idx, *post_ptr;
5574                 u8 *data;
5575
5576                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5577                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5578                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5579                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5580                         dma_addr = dma_unmap_addr(ri, mapping);
5581                         data = ri->data;
5582                         post_ptr = &std_prod_idx;
5583                         rx_std_posted++;
5584                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5585                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5586                         dma_addr = dma_unmap_addr(ri, mapping);
5587                         data = ri->data;
5588                         post_ptr = &jmb_prod_idx;
5589                 } else
5590                         goto next_pkt_nopost;
5591
5592                 work_mask |= opaque_key;
5593
5594                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5595                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5596                 drop_it:
5597                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5598                                        desc_idx, *post_ptr);
5599                 drop_it_no_recycle:
5600                         /* Other statistics kept track of by card. */
5601                         tp->rx_dropped++;
5602                         goto next_pkt;
5603                 }
5604
5605                 prefetch(data + TG3_RX_OFFSET(tp));
5606                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5607                       ETH_FCS_LEN;
5608
5609                 if (len > TG3_RX_COPY_THRESH(tp)) {
5610                         int skb_size;
5611
5612                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5613                                                     *post_ptr);
5614                         if (skb_size < 0)
5615                                 goto drop_it;
5616
5617                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5618                                          PCI_DMA_FROMDEVICE);
5619
5620                         skb = build_skb(data);
5621                         if (!skb) {
5622                                 kfree(data);
5623                                 goto drop_it_no_recycle;
5624                         }
5625                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5626                         /* Ensure that the update to the data happens
5627                          * after the usage of the old DMA mapping.
5628                          */
5629                         smp_wmb();
5630
5631                         ri->data = NULL;
5632
5633                 } else {
5634                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5635                                        desc_idx, *post_ptr);
5636
5637                         skb = netdev_alloc_skb(tp->dev,
5638                                                len + TG3_RAW_IP_ALIGN);
5639                         if (skb == NULL)
5640                                 goto drop_it_no_recycle;
5641
5642                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5643                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5644                         memcpy(skb->data,
5645                                data + TG3_RX_OFFSET(tp),
5646                                len);
5647                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5648                 }
5649
5650                 skb_put(skb, len);
5651                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5652                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5653                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5654                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5655                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5656                 else
5657                         skb_checksum_none_assert(skb);
5658
5659                 skb->protocol = eth_type_trans(skb, tp->dev);
5660
5661                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5662                     skb->protocol != htons(ETH_P_8021Q)) {
5663                         dev_kfree_skb(skb);
5664                         goto drop_it_no_recycle;
5665                 }
5666
5667                 if (desc->type_flags & RXD_FLAG_VLAN &&
5668                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5669                         __vlan_hwaccel_put_tag(skb,
5670                                                desc->err_vlan & RXD_VLAN_MASK);
5671
5672                 napi_gro_receive(&tnapi->napi, skb);
5673
5674                 received++;
5675                 budget--;
5676
5677 next_pkt:
5678                 (*post_ptr)++;
5679
5680                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5681                         tpr->rx_std_prod_idx = std_prod_idx &
5682                                                tp->rx_std_ring_mask;
5683                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5684                                      tpr->rx_std_prod_idx);
5685                         work_mask &= ~RXD_OPAQUE_RING_STD;
5686                         rx_std_posted = 0;
5687                 }
5688 next_pkt_nopost:
5689                 sw_idx++;
5690                 sw_idx &= tp->rx_ret_ring_mask;
5691
5692                 /* Refresh hw_idx to see if there is new work */
5693                 if (sw_idx == hw_idx) {
5694                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5695                         rmb();
5696                 }
5697         }
5698
5699         /* ACK the status ring. */
5700         tnapi->rx_rcb_ptr = sw_idx;
5701         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5702
5703         /* Refill RX ring(s). */
5704         if (!tg3_flag(tp, ENABLE_RSS)) {
5705                 if (work_mask & RXD_OPAQUE_RING_STD) {
5706                         tpr->rx_std_prod_idx = std_prod_idx &
5707                                                tp->rx_std_ring_mask;
5708                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5709                                      tpr->rx_std_prod_idx);
5710                 }
5711                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5712                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5713                                                tp->rx_jmb_ring_mask;
5714                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5715                                      tpr->rx_jmb_prod_idx);
5716                 }
5717                 mmiowb();
5718         } else if (work_mask) {
5719                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5720                  * updated before the producer indices can be updated.
5721                  */
5722                 smp_wmb();
5723
5724                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5725                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5726
5727                 if (tnapi != &tp->napi[1])
5728                         napi_schedule(&tp->napi[1].napi);
5729         }
5730
5731         return received;
5732 }
5733
5734 static void tg3_poll_link(struct tg3 *tp)
5735 {
5736         /* handle link change and other phy events */
5737         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5738                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5739
5740                 if (sblk->status & SD_STATUS_LINK_CHG) {
5741                         sblk->status = SD_STATUS_UPDATED |
5742                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5743                         spin_lock(&tp->lock);
5744                         if (tg3_flag(tp, USE_PHYLIB)) {
5745                                 tw32_f(MAC_STATUS,
5746                                      (MAC_STATUS_SYNC_CHANGED |
5747                                       MAC_STATUS_CFG_CHANGED |
5748                                       MAC_STATUS_MI_COMPLETION |
5749                                       MAC_STATUS_LNKSTATE_CHANGED));
5750                                 udelay(40);
5751                         } else
5752                                 tg3_setup_phy(tp, 0);
5753                         spin_unlock(&tp->lock);
5754                 }
5755         }
5756 }
5757
5758 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5759                                 struct tg3_rx_prodring_set *dpr,
5760                                 struct tg3_rx_prodring_set *spr)
5761 {
5762         u32 si, di, cpycnt, src_prod_idx;
5763         int i, err = 0;
5764
5765         while (1) {
5766                 src_prod_idx = spr->rx_std_prod_idx;
5767
5768                 /* Make sure updates to the rx_std_buffers[] entries and the
5769                  * standard producer index are seen in the correct order.
5770                  */
5771                 smp_rmb();
5772
5773                 if (spr->rx_std_cons_idx == src_prod_idx)
5774                         break;
5775
5776                 if (spr->rx_std_cons_idx < src_prod_idx)
5777                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5778                 else
5779                         cpycnt = tp->rx_std_ring_mask + 1 -
5780                                  spr->rx_std_cons_idx;
5781
5782                 cpycnt = min(cpycnt,
5783                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5784
5785                 si = spr->rx_std_cons_idx;
5786                 di = dpr->rx_std_prod_idx;
5787
5788                 for (i = di; i < di + cpycnt; i++) {
5789                         if (dpr->rx_std_buffers[i].data) {
5790                                 cpycnt = i - di;
5791                                 err = -ENOSPC;
5792                                 break;
5793                         }
5794                 }
5795
5796                 if (!cpycnt)
5797                         break;
5798
5799                 /* Ensure that updates to the rx_std_buffers ring and the
5800                  * shadowed hardware producer ring from tg3_recycle_skb() are
5801                  * ordered correctly WRT the skb check above.
5802                  */
5803                 smp_rmb();
5804
5805                 memcpy(&dpr->rx_std_buffers[di],
5806                        &spr->rx_std_buffers[si],
5807                        cpycnt * sizeof(struct ring_info));
5808
5809                 for (i = 0; i < cpycnt; i++, di++, si++) {
5810                         struct tg3_rx_buffer_desc *sbd, *dbd;
5811                         sbd = &spr->rx_std[si];
5812                         dbd = &dpr->rx_std[di];
5813                         dbd->addr_hi = sbd->addr_hi;
5814                         dbd->addr_lo = sbd->addr_lo;
5815                 }
5816
5817                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5818                                        tp->rx_std_ring_mask;
5819                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5820                                        tp->rx_std_ring_mask;
5821         }
5822
5823         while (1) {
5824                 src_prod_idx = spr->rx_jmb_prod_idx;
5825
5826                 /* Make sure updates to the rx_jmb_buffers[] entries and
5827                  * the jumbo producer index are seen in the correct order.
5828                  */
5829                 smp_rmb();
5830
5831                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5832                         break;
5833
5834                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5835                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5836                 else
5837                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5838                                  spr->rx_jmb_cons_idx;
5839
5840                 cpycnt = min(cpycnt,
5841                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5842
5843                 si = spr->rx_jmb_cons_idx;
5844                 di = dpr->rx_jmb_prod_idx;
5845
5846                 for (i = di; i < di + cpycnt; i++) {
5847                         if (dpr->rx_jmb_buffers[i].data) {
5848                                 cpycnt = i - di;
5849                                 err = -ENOSPC;
5850                                 break;
5851                         }
5852                 }
5853
5854                 if (!cpycnt)
5855                         break;
5856
5857                 /* Ensure that updates to the rx_jmb_buffers ring and the
5858                  * shadowed hardware producer ring from tg3_recycle_skb() are
5859                  * ordered correctly WRT the skb check above.
5860                  */
5861                 smp_rmb();
5862
5863                 memcpy(&dpr->rx_jmb_buffers[di],
5864                        &spr->rx_jmb_buffers[si],
5865                        cpycnt * sizeof(struct ring_info));
5866
5867                 for (i = 0; i < cpycnt; i++, di++, si++) {
5868                         struct tg3_rx_buffer_desc *sbd, *dbd;
5869                         sbd = &spr->rx_jmb[si].std;
5870                         dbd = &dpr->rx_jmb[di].std;
5871                         dbd->addr_hi = sbd->addr_hi;
5872                         dbd->addr_lo = sbd->addr_lo;
5873                 }
5874
5875                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5876                                        tp->rx_jmb_ring_mask;
5877                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5878                                        tp->rx_jmb_ring_mask;
5879         }
5880
5881         return err;
5882 }
5883
5884 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5885 {
5886         struct tg3 *tp = tnapi->tp;
5887
5888         /* run TX completion thread */
5889         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5890                 tg3_tx(tnapi);
5891                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5892                         return work_done;
5893         }
5894
5895         /* run RX thread, within the bounds set by NAPI.
5896          * All RX "locking" is done by ensuring outside
5897          * code synchronizes with tg3->napi.poll()
5898          */
5899         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5900                 work_done += tg3_rx(tnapi, budget - work_done);
5901
5902         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5903                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5904                 int i, err = 0;
5905                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5906                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5907
5908                 for (i = 1; i < tp->irq_cnt; i++)
5909                         err |= tg3_rx_prodring_xfer(tp, dpr,
5910                                                     &tp->napi[i].prodring);
5911
5912                 wmb();
5913
5914                 if (std_prod_idx != dpr->rx_std_prod_idx)
5915                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5916                                      dpr->rx_std_prod_idx);
5917
5918                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5919                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5920                                      dpr->rx_jmb_prod_idx);
5921
5922                 mmiowb();
5923
5924                 if (err)
5925                         tw32_f(HOSTCC_MODE, tp->coal_now);
5926         }
5927
5928         return work_done;
5929 }
5930
5931 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5932 {
5933         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5934                 schedule_work(&tp->reset_task);
5935 }
5936
5937 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5938 {
5939         cancel_work_sync(&tp->reset_task);
5940         tg3_flag_clear(tp, RESET_TASK_PENDING);
5941 }
5942
5943 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5944 {
5945         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5946         struct tg3 *tp = tnapi->tp;
5947         int work_done = 0;
5948         struct tg3_hw_status *sblk = tnapi->hw_status;
5949
5950         while (1) {
5951                 work_done = tg3_poll_work(tnapi, work_done, budget);
5952
5953                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5954                         goto tx_recovery;
5955
5956                 if (unlikely(work_done >= budget))
5957                         break;
5958
5959                 /* tp->last_tag is used in tg3_int_reenable() below
5960                  * to tell the hw how much work has been processed,
5961                  * so we must read it before checking for more work.
5962                  */
5963                 tnapi->last_tag = sblk->status_tag;
5964                 tnapi->last_irq_tag = tnapi->last_tag;
5965                 rmb();
5966
5967                 /* check for RX/TX work to do */
5968                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5969                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5970                         napi_complete(napi);
5971                         /* Reenable interrupts. */
5972                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5973                         mmiowb();
5974                         break;
5975                 }
5976         }
5977
5978         return work_done;
5979
5980 tx_recovery:
5981         /* work_done is guaranteed to be less than budget. */
5982         napi_complete(napi);
5983         tg3_reset_task_schedule(tp);
5984         return work_done;
5985 }
5986
5987 static void tg3_process_error(struct tg3 *tp)
5988 {
5989         u32 val;
5990         bool real_error = false;
5991
5992         if (tg3_flag(tp, ERROR_PROCESSED))
5993                 return;
5994
5995         /* Check Flow Attention register */
5996         val = tr32(HOSTCC_FLOW_ATTN);
5997         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5998                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5999                 real_error = true;
6000         }
6001
6002         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6003                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6004                 real_error = true;
6005         }
6006
6007         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6008                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6009                 real_error = true;
6010         }
6011
6012         if (!real_error)
6013                 return;
6014
6015         tg3_dump_state(tp);
6016
6017         tg3_flag_set(tp, ERROR_PROCESSED);
6018         tg3_reset_task_schedule(tp);
6019 }
6020
6021 static int tg3_poll(struct napi_struct *napi, int budget)
6022 {
6023         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6024         struct tg3 *tp = tnapi->tp;
6025         int work_done = 0;
6026         struct tg3_hw_status *sblk = tnapi->hw_status;
6027
6028         while (1) {
6029                 if (sblk->status & SD_STATUS_ERROR)
6030                         tg3_process_error(tp);
6031
6032                 tg3_poll_link(tp);
6033
6034                 work_done = tg3_poll_work(tnapi, work_done, budget);
6035
6036                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6037                         goto tx_recovery;
6038
6039                 if (unlikely(work_done >= budget))
6040                         break;
6041
6042                 if (tg3_flag(tp, TAGGED_STATUS)) {
6043                         /* tp->last_tag is used in tg3_int_reenable() below
6044                          * to tell the hw how much work has been processed,
6045                          * so we must read it before checking for more work.
6046                          */
6047                         tnapi->last_tag = sblk->status_tag;
6048                         tnapi->last_irq_tag = tnapi->last_tag;
6049                         rmb();
6050                 } else
6051                         sblk->status &= ~SD_STATUS_UPDATED;
6052
6053                 if (likely(!tg3_has_work(tnapi))) {
6054                         napi_complete(napi);
6055                         tg3_int_reenable(tnapi);
6056                         break;
6057                 }
6058         }
6059
6060         return work_done;
6061
6062 tx_recovery:
6063         /* work_done is guaranteed to be less than budget. */
6064         napi_complete(napi);
6065         tg3_reset_task_schedule(tp);
6066         return work_done;
6067 }
6068
6069 static void tg3_napi_disable(struct tg3 *tp)
6070 {
6071         int i;
6072
6073         for (i = tp->irq_cnt - 1; i >= 0; i--)
6074                 napi_disable(&tp->napi[i].napi);
6075 }
6076
6077 static void tg3_napi_enable(struct tg3 *tp)
6078 {
6079         int i;
6080
6081         for (i = 0; i < tp->irq_cnt; i++)
6082                 napi_enable(&tp->napi[i].napi);
6083 }
6084
6085 static void tg3_napi_init(struct tg3 *tp)
6086 {
6087         int i;
6088
6089         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6090         for (i = 1; i < tp->irq_cnt; i++)
6091                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6092 }
6093
6094 static void tg3_napi_fini(struct tg3 *tp)
6095 {
6096         int i;
6097
6098         for (i = 0; i < tp->irq_cnt; i++)
6099                 netif_napi_del(&tp->napi[i].napi);
6100 }
6101
6102 static inline void tg3_netif_stop(struct tg3 *tp)
6103 {
6104         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6105         tg3_napi_disable(tp);
6106         netif_tx_disable(tp->dev);
6107 }
6108
6109 static inline void tg3_netif_start(struct tg3 *tp)
6110 {
6111         /* NOTE: unconditional netif_tx_wake_all_queues is only
6112          * appropriate so long as all callers are assured to
6113          * have free tx slots (such as after tg3_init_hw)
6114          */
6115         netif_tx_wake_all_queues(tp->dev);
6116
6117         tg3_napi_enable(tp);
6118         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6119         tg3_enable_ints(tp);
6120 }
6121
6122 static void tg3_irq_quiesce(struct tg3 *tp)
6123 {
6124         int i;
6125
6126         BUG_ON(tp->irq_sync);
6127
6128         tp->irq_sync = 1;
6129         smp_mb();
6130
6131         for (i = 0; i < tp->irq_cnt; i++)
6132                 synchronize_irq(tp->napi[i].irq_vec);
6133 }
6134
6135 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6136  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6137  * with as well.  Most of the time, this is not necessary except when
6138  * shutting down the device.
6139  */
6140 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6141 {
6142         spin_lock_bh(&tp->lock);
6143         if (irq_sync)
6144                 tg3_irq_quiesce(tp);
6145 }
6146
6147 static inline void tg3_full_unlock(struct tg3 *tp)
6148 {
6149         spin_unlock_bh(&tp->lock);
6150 }
6151
6152 /* One-shot MSI handler - Chip automatically disables interrupt
6153  * after sending MSI so driver doesn't have to do it.
6154  */
6155 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6156 {
6157         struct tg3_napi *tnapi = dev_id;
6158         struct tg3 *tp = tnapi->tp;
6159
6160         prefetch(tnapi->hw_status);
6161         if (tnapi->rx_rcb)
6162                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6163
6164         if (likely(!tg3_irq_sync(tp)))
6165                 napi_schedule(&tnapi->napi);
6166
6167         return IRQ_HANDLED;
6168 }
6169
6170 /* MSI ISR - No need to check for interrupt sharing and no need to
6171  * flush status block and interrupt mailbox. PCI ordering rules
6172  * guarantee that MSI will arrive after the status block.
6173  */
6174 static irqreturn_t tg3_msi(int irq, void *dev_id)
6175 {
6176         struct tg3_napi *tnapi = dev_id;
6177         struct tg3 *tp = tnapi->tp;
6178
6179         prefetch(tnapi->hw_status);
6180         if (tnapi->rx_rcb)
6181                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6182         /*
6183          * Writing any value to intr-mbox-0 clears PCI INTA# and
6184          * chip-internal interrupt pending events.
6185          * Writing non-zero to intr-mbox-0 additional tells the
6186          * NIC to stop sending us irqs, engaging "in-intr-handler"
6187          * event coalescing.
6188          */
6189         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6190         if (likely(!tg3_irq_sync(tp)))
6191                 napi_schedule(&tnapi->napi);
6192
6193         return IRQ_RETVAL(1);
6194 }
6195
6196 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6197 {
6198         struct tg3_napi *tnapi = dev_id;
6199         struct tg3 *tp = tnapi->tp;
6200         struct tg3_hw_status *sblk = tnapi->hw_status;
6201         unsigned int handled = 1;
6202
6203         /* In INTx mode, it is possible for the interrupt to arrive at
6204          * the CPU before the status block posted prior to the interrupt.
6205          * Reading the PCI State register will confirm whether the
6206          * interrupt is ours and will flush the status block.
6207          */
6208         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6209                 if (tg3_flag(tp, CHIP_RESETTING) ||
6210                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6211                         handled = 0;
6212                         goto out;
6213                 }
6214         }
6215
6216         /*
6217          * Writing any value to intr-mbox-0 clears PCI INTA# and
6218          * chip-internal interrupt pending events.
6219          * Writing non-zero to intr-mbox-0 additional tells the
6220          * NIC to stop sending us irqs, engaging "in-intr-handler"
6221          * event coalescing.
6222          *
6223          * Flush the mailbox to de-assert the IRQ immediately to prevent
6224          * spurious interrupts.  The flush impacts performance but
6225          * excessive spurious interrupts can be worse in some cases.
6226          */
6227         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6228         if (tg3_irq_sync(tp))
6229                 goto out;
6230         sblk->status &= ~SD_STATUS_UPDATED;
6231         if (likely(tg3_has_work(tnapi))) {
6232                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6233                 napi_schedule(&tnapi->napi);
6234         } else {
6235                 /* No work, shared interrupt perhaps?  re-enable
6236                  * interrupts, and flush that PCI write
6237                  */
6238                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6239                                0x00000000);
6240         }
6241 out:
6242         return IRQ_RETVAL(handled);
6243 }
6244
6245 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6246 {
6247         struct tg3_napi *tnapi = dev_id;
6248         struct tg3 *tp = tnapi->tp;
6249         struct tg3_hw_status *sblk = tnapi->hw_status;
6250         unsigned int handled = 1;
6251
6252         /* In INTx mode, it is possible for the interrupt to arrive at
6253          * the CPU before the status block posted prior to the interrupt.
6254          * Reading the PCI State register will confirm whether the
6255          * interrupt is ours and will flush the status block.
6256          */
6257         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6258                 if (tg3_flag(tp, CHIP_RESETTING) ||
6259                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6260                         handled = 0;
6261                         goto out;
6262                 }
6263         }
6264
6265         /*
6266          * writing any value to intr-mbox-0 clears PCI INTA# and
6267          * chip-internal interrupt pending events.
6268          * writing non-zero to intr-mbox-0 additional tells the
6269          * NIC to stop sending us irqs, engaging "in-intr-handler"
6270          * event coalescing.
6271          *
6272          * Flush the mailbox to de-assert the IRQ immediately to prevent
6273          * spurious interrupts.  The flush impacts performance but
6274          * excessive spurious interrupts can be worse in some cases.
6275          */
6276         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6277
6278         /*
6279          * In a shared interrupt configuration, sometimes other devices'
6280          * interrupts will scream.  We record the current status tag here
6281          * so that the above check can report that the screaming interrupts
6282          * are unhandled.  Eventually they will be silenced.
6283          */
6284         tnapi->last_irq_tag = sblk->status_tag;
6285
6286         if (tg3_irq_sync(tp))
6287                 goto out;
6288
6289         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6290
6291         napi_schedule(&tnapi->napi);
6292
6293 out:
6294         return IRQ_RETVAL(handled);
6295 }
6296
6297 /* ISR for interrupt test */
6298 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6299 {
6300         struct tg3_napi *tnapi = dev_id;
6301         struct tg3 *tp = tnapi->tp;
6302         struct tg3_hw_status *sblk = tnapi->hw_status;
6303
6304         if ((sblk->status & SD_STATUS_UPDATED) ||
6305             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6306                 tg3_disable_ints(tp);
6307                 return IRQ_RETVAL(1);
6308         }
6309         return IRQ_RETVAL(0);
6310 }
6311
6312 static int tg3_init_hw(struct tg3 *, int);
6313 static int tg3_halt(struct tg3 *, int, int);
6314
6315 /* Restart hardware after configuration changes, self-test, etc.
6316  * Invoked with tp->lock held.
6317  */
6318 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6319         __releases(tp->lock)
6320         __acquires(tp->lock)
6321 {
6322         int err;
6323
6324         err = tg3_init_hw(tp, reset_phy);
6325         if (err) {
6326                 netdev_err(tp->dev,
6327                            "Failed to re-initialize device, aborting\n");
6328                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6329                 tg3_full_unlock(tp);
6330                 del_timer_sync(&tp->timer);
6331                 tp->irq_sync = 0;
6332                 tg3_napi_enable(tp);
6333                 dev_close(tp->dev);
6334                 tg3_full_lock(tp, 0);
6335         }
6336         return err;
6337 }
6338
6339 #ifdef CONFIG_NET_POLL_CONTROLLER
6340 static void tg3_poll_controller(struct net_device *dev)
6341 {
6342         int i;
6343         struct tg3 *tp = netdev_priv(dev);
6344
6345         for (i = 0; i < tp->irq_cnt; i++)
6346                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6347 }
6348 #endif
6349
6350 static void tg3_reset_task(struct work_struct *work)
6351 {
6352         struct tg3 *tp = container_of(work, struct tg3, reset_task);
6353         int err;
6354
6355         tg3_full_lock(tp, 0);
6356
6357         if (!netif_running(tp->dev)) {
6358                 tg3_flag_clear(tp, RESET_TASK_PENDING);
6359                 tg3_full_unlock(tp);
6360                 return;
6361         }
6362
6363         tg3_full_unlock(tp);
6364
6365         tg3_phy_stop(tp);
6366
6367         tg3_netif_stop(tp);
6368
6369         tg3_full_lock(tp, 1);
6370
6371         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6372                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6373                 tp->write32_rx_mbox = tg3_write_flush_reg32;
6374                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6375                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6376         }
6377
6378         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6379         err = tg3_init_hw(tp, 1);
6380         if (err)
6381                 goto out;
6382
6383         tg3_netif_start(tp);
6384
6385 out:
6386         tg3_full_unlock(tp);
6387
6388         if (!err)
6389                 tg3_phy_start(tp);
6390
6391         tg3_flag_clear(tp, RESET_TASK_PENDING);
6392 }
6393
6394 static void tg3_tx_timeout(struct net_device *dev)
6395 {
6396         struct tg3 *tp = netdev_priv(dev);
6397
6398         if (netif_msg_tx_err(tp)) {
6399                 netdev_err(dev, "transmit timed out, resetting\n");
6400                 tg3_dump_state(tp);
6401         }
6402
6403         tg3_reset_task_schedule(tp);
6404 }
6405
6406 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6407 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6408 {
6409         u32 base = (u32) mapping & 0xffffffff;
6410
6411         return (base > 0xffffdcc0) && (base + len + 8 < base);
6412 }
6413
6414 /* Test for DMA addresses > 40-bit */
6415 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6416                                           int len)
6417 {
6418 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6419         if (tg3_flag(tp, 40BIT_DMA_BUG))
6420                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6421         return 0;
6422 #else
6423         return 0;
6424 #endif
6425 }
6426
6427 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6428                                  dma_addr_t mapping, u32 len, u32 flags,
6429                                  u32 mss, u32 vlan)
6430 {
6431         txbd->addr_hi = ((u64) mapping >> 32);
6432         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6433         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6434         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6435 }
6436
6437 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6438                             dma_addr_t map, u32 len, u32 flags,
6439                             u32 mss, u32 vlan)
6440 {
6441         struct tg3 *tp = tnapi->tp;
6442         bool hwbug = false;
6443
6444         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6445                 hwbug = 1;
6446
6447         if (tg3_4g_overflow_test(map, len))
6448                 hwbug = 1;
6449
6450         if (tg3_40bit_overflow_test(tp, map, len))
6451                 hwbug = 1;
6452
6453         if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6454                 u32 prvidx = *entry;
6455                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6456                 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6457                         u32 frag_len = TG3_TX_BD_DMA_MAX;
6458                         len -= TG3_TX_BD_DMA_MAX;
6459
6460                         /* Avoid the 8byte DMA problem */
6461                         if (len <= 8) {
6462                                 len += TG3_TX_BD_DMA_MAX / 2;
6463                                 frag_len = TG3_TX_BD_DMA_MAX / 2;
6464                         }
6465
6466                         tnapi->tx_buffers[*entry].fragmented = true;
6467
6468                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6469                                       frag_len, tmp_flag, mss, vlan);
6470                         *budget -= 1;
6471                         prvidx = *entry;
6472                         *entry = NEXT_TX(*entry);
6473
6474                         map += frag_len;
6475                 }
6476
6477                 if (len) {
6478                         if (*budget) {
6479                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6480                                               len, flags, mss, vlan);
6481                                 *budget -= 1;
6482                                 *entry = NEXT_TX(*entry);
6483                         } else {
6484                                 hwbug = 1;
6485                                 tnapi->tx_buffers[prvidx].fragmented = false;
6486                         }
6487                 }
6488         } else {
6489                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6490                               len, flags, mss, vlan);
6491                 *entry = NEXT_TX(*entry);
6492         }
6493
6494         return hwbug;
6495 }
6496
6497 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6498 {
6499         int i;
6500         struct sk_buff *skb;
6501         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6502
6503         skb = txb->skb;
6504         txb->skb = NULL;
6505
6506         pci_unmap_single(tnapi->tp->pdev,
6507                          dma_unmap_addr(txb, mapping),
6508                          skb_headlen(skb),
6509                          PCI_DMA_TODEVICE);
6510
6511         while (txb->fragmented) {
6512                 txb->fragmented = false;
6513                 entry = NEXT_TX(entry);
6514                 txb = &tnapi->tx_buffers[entry];
6515         }
6516
6517         for (i = 0; i <= last; i++) {
6518                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6519
6520                 entry = NEXT_TX(entry);
6521                 txb = &tnapi->tx_buffers[entry];
6522
6523                 pci_unmap_page(tnapi->tp->pdev,
6524                                dma_unmap_addr(txb, mapping),
6525                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6526
6527                 while (txb->fragmented) {
6528                         txb->fragmented = false;
6529                         entry = NEXT_TX(entry);
6530                         txb = &tnapi->tx_buffers[entry];
6531                 }
6532         }
6533 }
6534
6535 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6536 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6537                                        struct sk_buff **pskb,
6538                                        u32 *entry, u32 *budget,
6539                                        u32 base_flags, u32 mss, u32 vlan)
6540 {
6541         struct tg3 *tp = tnapi->tp;
6542         struct sk_buff *new_skb, *skb = *pskb;
6543         dma_addr_t new_addr = 0;
6544         int ret = 0;
6545
6546         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6547                 new_skb = skb_copy(skb, GFP_ATOMIC);
6548         else {
6549                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6550
6551                 new_skb = skb_copy_expand(skb,
6552                                           skb_headroom(skb) + more_headroom,
6553                                           skb_tailroom(skb), GFP_ATOMIC);
6554         }
6555
6556         if (!new_skb) {
6557                 ret = -1;
6558         } else {
6559                 /* New SKB is guaranteed to be linear. */
6560                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6561                                           PCI_DMA_TODEVICE);
6562                 /* Make sure the mapping succeeded */
6563                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6564                         dev_kfree_skb(new_skb);
6565                         ret = -1;
6566                 } else {
6567                         u32 save_entry = *entry;
6568
6569                         base_flags |= TXD_FLAG_END;
6570
6571                         tnapi->tx_buffers[*entry].skb = new_skb;
6572                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6573                                            mapping, new_addr);
6574
6575                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6576                                             new_skb->len, base_flags,
6577                                             mss, vlan)) {
6578                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6579                                 dev_kfree_skb(new_skb);
6580                                 ret = -1;
6581                         }
6582                 }
6583         }
6584
6585         dev_kfree_skb(skb);
6586         *pskb = new_skb;
6587         return ret;
6588 }
6589
6590 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6591
6592 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6593  * TSO header is greater than 80 bytes.
6594  */
6595 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6596 {
6597         struct sk_buff *segs, *nskb;
6598         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6599
6600         /* Estimate the number of fragments in the worst case */
6601         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6602                 netif_stop_queue(tp->dev);
6603
6604                 /* netif_tx_stop_queue() must be done before checking
6605                  * checking tx index in tg3_tx_avail() below, because in
6606                  * tg3_tx(), we update tx index before checking for
6607                  * netif_tx_queue_stopped().
6608                  */
6609                 smp_mb();
6610                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6611                         return NETDEV_TX_BUSY;
6612
6613                 netif_wake_queue(tp->dev);
6614         }
6615
6616         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6617         if (IS_ERR(segs))
6618                 goto tg3_tso_bug_end;
6619
6620         do {
6621                 nskb = segs;
6622                 segs = segs->next;
6623                 nskb->next = NULL;
6624                 tg3_start_xmit(nskb, tp->dev);
6625         } while (segs);
6626
6627 tg3_tso_bug_end:
6628         dev_kfree_skb(skb);
6629
6630         return NETDEV_TX_OK;
6631 }
6632
6633 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6634  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6635  */
6636 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6637 {
6638         struct tg3 *tp = netdev_priv(dev);
6639         u32 len, entry, base_flags, mss, vlan = 0;
6640         u32 budget;
6641         int i = -1, would_hit_hwbug;
6642         dma_addr_t mapping;
6643         struct tg3_napi *tnapi;
6644         struct netdev_queue *txq;
6645         unsigned int last;
6646
6647         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6648         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6649         if (tg3_flag(tp, ENABLE_TSS))
6650                 tnapi++;
6651
6652         budget = tg3_tx_avail(tnapi);
6653
6654         /* We are running in BH disabled context with netif_tx_lock
6655          * and TX reclaim runs via tp->napi.poll inside of a software
6656          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6657          * no IRQ context deadlocks to worry about either.  Rejoice!
6658          */
6659         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6660                 if (!netif_tx_queue_stopped(txq)) {
6661                         netif_tx_stop_queue(txq);
6662
6663                         /* This is a hard error, log it. */
6664                         netdev_err(dev,
6665                                    "BUG! Tx Ring full when queue awake!\n");
6666                 }
6667                 return NETDEV_TX_BUSY;
6668         }
6669
6670         entry = tnapi->tx_prod;
6671         base_flags = 0;
6672         if (skb->ip_summed == CHECKSUM_PARTIAL)
6673                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6674
6675         mss = skb_shinfo(skb)->gso_size;
6676         if (mss) {
6677                 struct iphdr *iph;
6678                 u32 tcp_opt_len, hdr_len;
6679
6680                 if (skb_header_cloned(skb) &&
6681                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6682                         goto drop;
6683
6684                 iph = ip_hdr(skb);
6685                 tcp_opt_len = tcp_optlen(skb);
6686
6687                 if (skb_is_gso_v6(skb)) {
6688                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6689                 } else {
6690                         u32 ip_tcp_len;
6691
6692                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6693                         hdr_len = ip_tcp_len + tcp_opt_len;
6694
6695                         iph->check = 0;
6696                         iph->tot_len = htons(mss + hdr_len);
6697                 }
6698
6699                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6700                     tg3_flag(tp, TSO_BUG))
6701                         return tg3_tso_bug(tp, skb);
6702
6703                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6704                                TXD_FLAG_CPU_POST_DMA);
6705
6706                 if (tg3_flag(tp, HW_TSO_1) ||
6707                     tg3_flag(tp, HW_TSO_2) ||
6708                     tg3_flag(tp, HW_TSO_3)) {
6709                         tcp_hdr(skb)->check = 0;
6710                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6711                 } else
6712                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6713                                                                  iph->daddr, 0,
6714                                                                  IPPROTO_TCP,
6715                                                                  0);
6716
6717                 if (tg3_flag(tp, HW_TSO_3)) {
6718                         mss |= (hdr_len & 0xc) << 12;
6719                         if (hdr_len & 0x10)
6720                                 base_flags |= 0x00000010;
6721                         base_flags |= (hdr_len & 0x3e0) << 5;
6722                 } else if (tg3_flag(tp, HW_TSO_2))
6723                         mss |= hdr_len << 9;
6724                 else if (tg3_flag(tp, HW_TSO_1) ||
6725                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6726                         if (tcp_opt_len || iph->ihl > 5) {
6727                                 int tsflags;
6728
6729                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6730                                 mss |= (tsflags << 11);
6731                         }
6732                 } else {
6733                         if (tcp_opt_len || iph->ihl > 5) {
6734                                 int tsflags;
6735
6736                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6737                                 base_flags |= tsflags << 12;
6738                         }
6739                 }
6740         }
6741
6742         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6743             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6744                 base_flags |= TXD_FLAG_JMB_PKT;
6745
6746         if (vlan_tx_tag_present(skb)) {
6747                 base_flags |= TXD_FLAG_VLAN;
6748                 vlan = vlan_tx_tag_get(skb);
6749         }
6750
6751         len = skb_headlen(skb);
6752
6753         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6754         if (pci_dma_mapping_error(tp->pdev, mapping))
6755                 goto drop;
6756
6757
6758         tnapi->tx_buffers[entry].skb = skb;
6759         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6760
6761         would_hit_hwbug = 0;
6762
6763         if (tg3_flag(tp, 5701_DMA_BUG))
6764                 would_hit_hwbug = 1;
6765
6766         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6767                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6768                             mss, vlan)) {
6769                 would_hit_hwbug = 1;
6770         /* Now loop through additional data fragments, and queue them. */
6771         } else if (skb_shinfo(skb)->nr_frags > 0) {
6772                 u32 tmp_mss = mss;
6773
6774                 if (!tg3_flag(tp, HW_TSO_1) &&
6775                     !tg3_flag(tp, HW_TSO_2) &&
6776                     !tg3_flag(tp, HW_TSO_3))
6777                         tmp_mss = 0;
6778
6779                 last = skb_shinfo(skb)->nr_frags - 1;
6780                 for (i = 0; i <= last; i++) {
6781                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6782
6783                         len = skb_frag_size(frag);
6784                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6785                                                    len, DMA_TO_DEVICE);
6786
6787                         tnapi->tx_buffers[entry].skb = NULL;
6788                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6789                                            mapping);
6790                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6791                                 goto dma_error;
6792
6793                         if (!budget ||
6794                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6795                                             len, base_flags |
6796                                             ((i == last) ? TXD_FLAG_END : 0),
6797                                             tmp_mss, vlan)) {
6798                                 would_hit_hwbug = 1;
6799                                 break;
6800                         }
6801                 }
6802         }
6803
6804         if (would_hit_hwbug) {
6805                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6806
6807                 /* If the workaround fails due to memory/mapping
6808                  * failure, silently drop this packet.
6809                  */
6810                 entry = tnapi->tx_prod;
6811                 budget = tg3_tx_avail(tnapi);
6812                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6813                                                 base_flags, mss, vlan))
6814                         goto drop_nofree;
6815         }
6816
6817         skb_tx_timestamp(skb);
6818         netdev_sent_queue(tp->dev, skb->len);
6819
6820         /* Packets are ready, update Tx producer idx local and on card. */
6821         tw32_tx_mbox(tnapi->prodmbox, entry);
6822
6823         tnapi->tx_prod = entry;
6824         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6825                 netif_tx_stop_queue(txq);
6826
6827                 /* netif_tx_stop_queue() must be done before checking
6828                  * checking tx index in tg3_tx_avail() below, because in
6829                  * tg3_tx(), we update tx index before checking for
6830                  * netif_tx_queue_stopped().
6831                  */
6832                 smp_mb();
6833                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6834                         netif_tx_wake_queue(txq);
6835         }
6836
6837         mmiowb();
6838         return NETDEV_TX_OK;
6839
6840 dma_error:
6841         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6842         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6843 drop:
6844         dev_kfree_skb(skb);
6845 drop_nofree:
6846         tp->tx_dropped++;
6847         return NETDEV_TX_OK;
6848 }
6849
6850 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6851 {
6852         if (enable) {
6853                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6854                                   MAC_MODE_PORT_MODE_MASK);
6855
6856                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6857
6858                 if (!tg3_flag(tp, 5705_PLUS))
6859                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6860
6861                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6862                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6863                 else
6864                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6865         } else {
6866                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6867
6868                 if (tg3_flag(tp, 5705_PLUS) ||
6869                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6870                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6871                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6872         }
6873
6874         tw32(MAC_MODE, tp->mac_mode);
6875         udelay(40);
6876 }
6877
6878 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6879 {
6880         u32 val, bmcr, mac_mode, ptest = 0;
6881
6882         tg3_phy_toggle_apd(tp, false);
6883         tg3_phy_toggle_automdix(tp, 0);
6884
6885         if (extlpbk && tg3_phy_set_extloopbk(tp))
6886                 return -EIO;
6887
6888         bmcr = BMCR_FULLDPLX;
6889         switch (speed) {
6890         case SPEED_10:
6891                 break;
6892         case SPEED_100:
6893                 bmcr |= BMCR_SPEED100;
6894                 break;
6895         case SPEED_1000:
6896         default:
6897                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6898                         speed = SPEED_100;
6899                         bmcr |= BMCR_SPEED100;
6900                 } else {
6901                         speed = SPEED_1000;
6902                         bmcr |= BMCR_SPEED1000;
6903                 }
6904         }
6905
6906         if (extlpbk) {
6907                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6908                         tg3_readphy(tp, MII_CTRL1000, &val);
6909                         val |= CTL1000_AS_MASTER |
6910                                CTL1000_ENABLE_MASTER;
6911                         tg3_writephy(tp, MII_CTRL1000, val);
6912                 } else {
6913                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6914                                 MII_TG3_FET_PTEST_TRIM_2;
6915                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6916                 }
6917         } else
6918                 bmcr |= BMCR_LOOPBACK;
6919
6920         tg3_writephy(tp, MII_BMCR, bmcr);
6921
6922         /* The write needs to be flushed for the FETs */
6923         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6924                 tg3_readphy(tp, MII_BMCR, &bmcr);
6925
6926         udelay(40);
6927
6928         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6930                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6931                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6932                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6933
6934                 /* The write needs to be flushed for the AC131 */
6935                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6936         }
6937
6938         /* Reset to prevent losing 1st rx packet intermittently */
6939         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6940             tg3_flag(tp, 5780_CLASS)) {
6941                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6942                 udelay(10);
6943                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6944         }
6945
6946         mac_mode = tp->mac_mode &
6947                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6948         if (speed == SPEED_1000)
6949                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6950         else
6951                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6952
6953         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6954                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6955
6956                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6957                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
6958                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6959                         mac_mode |= MAC_MODE_LINK_POLARITY;
6960
6961                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6962                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6963         }
6964
6965         tw32(MAC_MODE, mac_mode);
6966         udelay(40);
6967
6968         return 0;
6969 }
6970
6971 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
6972 {
6973         struct tg3 *tp = netdev_priv(dev);
6974
6975         if (features & NETIF_F_LOOPBACK) {
6976                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6977                         return;
6978
6979                 spin_lock_bh(&tp->lock);
6980                 tg3_mac_loopback(tp, true);
6981                 netif_carrier_on(tp->dev);
6982                 spin_unlock_bh(&tp->lock);
6983                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6984         } else {
6985                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6986                         return;
6987
6988                 spin_lock_bh(&tp->lock);
6989                 tg3_mac_loopback(tp, false);
6990                 /* Force link status check */
6991                 tg3_setup_phy(tp, 1);
6992                 spin_unlock_bh(&tp->lock);
6993                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6994         }
6995 }
6996
6997 static netdev_features_t tg3_fix_features(struct net_device *dev,
6998         netdev_features_t features)
6999 {
7000         struct tg3 *tp = netdev_priv(dev);
7001
7002         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7003                 features &= ~NETIF_F_ALL_TSO;
7004
7005         return features;
7006 }
7007
7008 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7009 {
7010         netdev_features_t changed = dev->features ^ features;
7011
7012         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7013                 tg3_set_loopback(dev, features);
7014
7015         return 0;
7016 }
7017
7018 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7019                                int new_mtu)
7020 {
7021         dev->mtu = new_mtu;
7022
7023         if (new_mtu > ETH_DATA_LEN) {
7024                 if (tg3_flag(tp, 5780_CLASS)) {
7025                         netdev_update_features(dev);
7026                         tg3_flag_clear(tp, TSO_CAPABLE);
7027                 } else {
7028                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
7029                 }
7030         } else {
7031                 if (tg3_flag(tp, 5780_CLASS)) {
7032                         tg3_flag_set(tp, TSO_CAPABLE);
7033                         netdev_update_features(dev);
7034                 }
7035                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7036         }
7037 }
7038
7039 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7040 {
7041         struct tg3 *tp = netdev_priv(dev);
7042         int err;
7043
7044         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7045                 return -EINVAL;
7046
7047         if (!netif_running(dev)) {
7048                 /* We'll just catch it later when the
7049                  * device is up'd.
7050                  */
7051                 tg3_set_mtu(dev, tp, new_mtu);
7052                 return 0;
7053         }
7054
7055         tg3_phy_stop(tp);
7056
7057         tg3_netif_stop(tp);
7058
7059         tg3_full_lock(tp, 1);
7060
7061         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7062
7063         tg3_set_mtu(dev, tp, new_mtu);
7064
7065         err = tg3_restart_hw(tp, 0);
7066
7067         if (!err)
7068                 tg3_netif_start(tp);
7069
7070         tg3_full_unlock(tp);
7071
7072         if (!err)
7073                 tg3_phy_start(tp);
7074
7075         return err;
7076 }
7077
7078 static void tg3_rx_prodring_free(struct tg3 *tp,
7079                                  struct tg3_rx_prodring_set *tpr)
7080 {
7081         int i;
7082
7083         if (tpr != &tp->napi[0].prodring) {
7084                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7085                      i = (i + 1) & tp->rx_std_ring_mask)
7086                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7087                                         tp->rx_pkt_map_sz);
7088
7089                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7090                         for (i = tpr->rx_jmb_cons_idx;
7091                              i != tpr->rx_jmb_prod_idx;
7092                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7093                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7094                                                 TG3_RX_JMB_MAP_SZ);
7095                         }
7096                 }
7097
7098                 return;
7099         }
7100
7101         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7102                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7103                                 tp->rx_pkt_map_sz);
7104
7105         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7106                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7107                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7108                                         TG3_RX_JMB_MAP_SZ);
7109         }
7110 }
7111
7112 /* Initialize rx rings for packet processing.
7113  *
7114  * The chip has been shut down and the driver detached from
7115  * the networking, so no interrupts or new tx packets will
7116  * end up in the driver.  tp->{tx,}lock are held and thus
7117  * we may not sleep.
7118  */
7119 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7120                                  struct tg3_rx_prodring_set *tpr)
7121 {
7122         u32 i, rx_pkt_dma_sz;
7123
7124         tpr->rx_std_cons_idx = 0;
7125         tpr->rx_std_prod_idx = 0;
7126         tpr->rx_jmb_cons_idx = 0;
7127         tpr->rx_jmb_prod_idx = 0;
7128
7129         if (tpr != &tp->napi[0].prodring) {
7130                 memset(&tpr->rx_std_buffers[0], 0,
7131                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7132                 if (tpr->rx_jmb_buffers)
7133                         memset(&tpr->rx_jmb_buffers[0], 0,
7134                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7135                 goto done;
7136         }
7137
7138         /* Zero out all descriptors. */
7139         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7140
7141         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7142         if (tg3_flag(tp, 5780_CLASS) &&
7143             tp->dev->mtu > ETH_DATA_LEN)
7144                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7145         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7146
7147         /* Initialize invariants of the rings, we only set this
7148          * stuff once.  This works because the card does not
7149          * write into the rx buffer posting rings.
7150          */
7151         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7152                 struct tg3_rx_buffer_desc *rxd;
7153
7154                 rxd = &tpr->rx_std[i];
7155                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7156                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7157                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7158                                (i << RXD_OPAQUE_INDEX_SHIFT));
7159         }
7160
7161         /* Now allocate fresh SKBs for each rx ring. */
7162         for (i = 0; i < tp->rx_pending; i++) {
7163                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7164                         netdev_warn(tp->dev,
7165                                     "Using a smaller RX standard ring. Only "
7166                                     "%d out of %d buffers were allocated "
7167                                     "successfully\n", i, tp->rx_pending);
7168                         if (i == 0)
7169                                 goto initfail;
7170                         tp->rx_pending = i;
7171                         break;
7172                 }
7173         }
7174
7175         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7176                 goto done;
7177
7178         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7179
7180         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7181                 goto done;
7182
7183         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7184                 struct tg3_rx_buffer_desc *rxd;
7185
7186                 rxd = &tpr->rx_jmb[i].std;
7187                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7188                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7189                                   RXD_FLAG_JUMBO;
7190                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7191                        (i << RXD_OPAQUE_INDEX_SHIFT));
7192         }
7193
7194         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7195                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7196                         netdev_warn(tp->dev,
7197                                     "Using a smaller RX jumbo ring. Only %d "
7198                                     "out of %d buffers were allocated "
7199                                     "successfully\n", i, tp->rx_jumbo_pending);
7200                         if (i == 0)
7201                                 goto initfail;
7202                         tp->rx_jumbo_pending = i;
7203                         break;
7204                 }
7205         }
7206
7207 done:
7208         return 0;
7209
7210 initfail:
7211         tg3_rx_prodring_free(tp, tpr);
7212         return -ENOMEM;
7213 }
7214
7215 static void tg3_rx_prodring_fini(struct tg3 *tp,
7216                                  struct tg3_rx_prodring_set *tpr)
7217 {
7218         kfree(tpr->rx_std_buffers);
7219         tpr->rx_std_buffers = NULL;
7220         kfree(tpr->rx_jmb_buffers);
7221         tpr->rx_jmb_buffers = NULL;
7222         if (tpr->rx_std) {
7223                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7224                                   tpr->rx_std, tpr->rx_std_mapping);
7225                 tpr->rx_std = NULL;
7226         }
7227         if (tpr->rx_jmb) {
7228                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7229                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7230                 tpr->rx_jmb = NULL;
7231         }
7232 }
7233
7234 static int tg3_rx_prodring_init(struct tg3 *tp,
7235                                 struct tg3_rx_prodring_set *tpr)
7236 {
7237         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7238                                       GFP_KERNEL);
7239         if (!tpr->rx_std_buffers)
7240                 return -ENOMEM;
7241
7242         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7243                                          TG3_RX_STD_RING_BYTES(tp),
7244                                          &tpr->rx_std_mapping,
7245                                          GFP_KERNEL);
7246         if (!tpr->rx_std)
7247                 goto err_out;
7248
7249         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7250                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7251                                               GFP_KERNEL);
7252                 if (!tpr->rx_jmb_buffers)
7253                         goto err_out;
7254
7255                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7256                                                  TG3_RX_JMB_RING_BYTES(tp),
7257                                                  &tpr->rx_jmb_mapping,
7258                                                  GFP_KERNEL);
7259                 if (!tpr->rx_jmb)
7260                         goto err_out;
7261         }
7262
7263         return 0;
7264
7265 err_out:
7266         tg3_rx_prodring_fini(tp, tpr);
7267         return -ENOMEM;
7268 }
7269
7270 /* Free up pending packets in all rx/tx rings.
7271  *
7272  * The chip has been shut down and the driver detached from
7273  * the networking, so no interrupts or new tx packets will
7274  * end up in the driver.  tp->{tx,}lock is not held and we are not
7275  * in an interrupt context and thus may sleep.
7276  */
7277 static void tg3_free_rings(struct tg3 *tp)
7278 {
7279         int i, j;
7280
7281         for (j = 0; j < tp->irq_cnt; j++) {
7282                 struct tg3_napi *tnapi = &tp->napi[j];
7283
7284                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7285
7286                 if (!tnapi->tx_buffers)
7287                         continue;
7288
7289                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7290                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7291
7292                         if (!skb)
7293                                 continue;
7294
7295                         tg3_tx_skb_unmap(tnapi, i,
7296                                          skb_shinfo(skb)->nr_frags - 1);
7297
7298                         dev_kfree_skb_any(skb);
7299                 }
7300         }
7301         netdev_reset_queue(tp->dev);
7302 }
7303
7304 /* Initialize tx/rx rings for packet processing.
7305  *
7306  * The chip has been shut down and the driver detached from
7307  * the networking, so no interrupts or new tx packets will
7308  * end up in the driver.  tp->{tx,}lock are held and thus
7309  * we may not sleep.
7310  */
7311 static int tg3_init_rings(struct tg3 *tp)
7312 {
7313         int i;
7314
7315         /* Free up all the SKBs. */
7316         tg3_free_rings(tp);
7317
7318         for (i = 0; i < tp->irq_cnt; i++) {
7319                 struct tg3_napi *tnapi = &tp->napi[i];
7320
7321                 tnapi->last_tag = 0;
7322                 tnapi->last_irq_tag = 0;
7323                 tnapi->hw_status->status = 0;
7324                 tnapi->hw_status->status_tag = 0;
7325                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7326
7327                 tnapi->tx_prod = 0;
7328                 tnapi->tx_cons = 0;
7329                 if (tnapi->tx_ring)
7330                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7331
7332                 tnapi->rx_rcb_ptr = 0;
7333                 if (tnapi->rx_rcb)
7334                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7335
7336                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7337                         tg3_free_rings(tp);
7338                         return -ENOMEM;
7339                 }
7340         }
7341
7342         return 0;
7343 }
7344
7345 /*
7346  * Must not be invoked with interrupt sources disabled and
7347  * the hardware shutdown down.
7348  */
7349 static void tg3_free_consistent(struct tg3 *tp)
7350 {
7351         int i;
7352
7353         for (i = 0; i < tp->irq_cnt; i++) {
7354                 struct tg3_napi *tnapi = &tp->napi[i];
7355
7356                 if (tnapi->tx_ring) {
7357                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7358                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7359                         tnapi->tx_ring = NULL;
7360                 }
7361
7362                 kfree(tnapi->tx_buffers);
7363                 tnapi->tx_buffers = NULL;
7364
7365                 if (tnapi->rx_rcb) {
7366                         dma_free_coherent(&tp->pdev->dev,
7367                                           TG3_RX_RCB_RING_BYTES(tp),
7368                                           tnapi->rx_rcb,
7369                                           tnapi->rx_rcb_mapping);
7370                         tnapi->rx_rcb = NULL;
7371                 }
7372
7373                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7374
7375                 if (tnapi->hw_status) {
7376                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7377                                           tnapi->hw_status,
7378                                           tnapi->status_mapping);
7379                         tnapi->hw_status = NULL;
7380                 }
7381         }
7382
7383         if (tp->hw_stats) {
7384                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7385                                   tp->hw_stats, tp->stats_mapping);
7386                 tp->hw_stats = NULL;
7387         }
7388 }
7389
7390 /*
7391  * Must not be invoked with interrupt sources disabled and
7392  * the hardware shutdown down.  Can sleep.
7393  */
7394 static int tg3_alloc_consistent(struct tg3 *tp)
7395 {
7396         int i;
7397
7398         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7399                                           sizeof(struct tg3_hw_stats),
7400                                           &tp->stats_mapping,
7401                                           GFP_KERNEL);
7402         if (!tp->hw_stats)
7403                 goto err_out;
7404
7405         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7406
7407         for (i = 0; i < tp->irq_cnt; i++) {
7408                 struct tg3_napi *tnapi = &tp->napi[i];
7409                 struct tg3_hw_status *sblk;
7410
7411                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7412                                                       TG3_HW_STATUS_SIZE,
7413                                                       &tnapi->status_mapping,
7414                                                       GFP_KERNEL);
7415                 if (!tnapi->hw_status)
7416                         goto err_out;
7417
7418                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7419                 sblk = tnapi->hw_status;
7420
7421                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7422                         goto err_out;
7423
7424                 /* If multivector TSS is enabled, vector 0 does not handle
7425                  * tx interrupts.  Don't allocate any resources for it.
7426                  */
7427                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7428                     (i && tg3_flag(tp, ENABLE_TSS))) {
7429                         tnapi->tx_buffers = kzalloc(
7430                                                sizeof(struct tg3_tx_ring_info) *
7431                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7432                         if (!tnapi->tx_buffers)
7433                                 goto err_out;
7434
7435                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7436                                                             TG3_TX_RING_BYTES,
7437                                                         &tnapi->tx_desc_mapping,
7438                                                             GFP_KERNEL);
7439                         if (!tnapi->tx_ring)
7440                                 goto err_out;
7441                 }
7442
7443                 /*
7444                  * When RSS is enabled, the status block format changes
7445                  * slightly.  The "rx_jumbo_consumer", "reserved",
7446                  * and "rx_mini_consumer" members get mapped to the
7447                  * other three rx return ring producer indexes.
7448                  */
7449                 switch (i) {
7450                 default:
7451                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7452                         break;
7453                 case 2:
7454                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7455                         break;
7456                 case 3:
7457                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7458                         break;
7459                 case 4:
7460                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7461                         break;
7462                 }
7463
7464                 /*
7465                  * If multivector RSS is enabled, vector 0 does not handle
7466                  * rx or tx interrupts.  Don't allocate any resources for it.
7467                  */
7468                 if (!i && tg3_flag(tp, ENABLE_RSS))
7469                         continue;
7470
7471                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7472                                                    TG3_RX_RCB_RING_BYTES(tp),
7473                                                    &tnapi->rx_rcb_mapping,
7474                                                    GFP_KERNEL);
7475                 if (!tnapi->rx_rcb)
7476                         goto err_out;
7477
7478                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7479         }
7480
7481         return 0;
7482
7483 err_out:
7484         tg3_free_consistent(tp);
7485         return -ENOMEM;
7486 }
7487
7488 #define MAX_WAIT_CNT 1000
7489
7490 /* To stop a block, clear the enable bit and poll till it
7491  * clears.  tp->lock is held.
7492  */
7493 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7494 {
7495         unsigned int i;
7496         u32 val;
7497
7498         if (tg3_flag(tp, 5705_PLUS)) {
7499                 switch (ofs) {
7500                 case RCVLSC_MODE:
7501                 case DMAC_MODE:
7502                 case MBFREE_MODE:
7503                 case BUFMGR_MODE:
7504                 case MEMARB_MODE:
7505                         /* We can't enable/disable these bits of the
7506                          * 5705/5750, just say success.
7507                          */
7508                         return 0;
7509
7510                 default:
7511                         break;
7512                 }
7513         }
7514
7515         val = tr32(ofs);
7516         val &= ~enable_bit;
7517         tw32_f(ofs, val);
7518
7519         for (i = 0; i < MAX_WAIT_CNT; i++) {
7520                 udelay(100);
7521                 val = tr32(ofs);
7522                 if ((val & enable_bit) == 0)
7523                         break;
7524         }
7525
7526         if (i == MAX_WAIT_CNT && !silent) {
7527                 dev_err(&tp->pdev->dev,
7528                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7529                         ofs, enable_bit);
7530                 return -ENODEV;
7531         }
7532
7533         return 0;
7534 }
7535
7536 /* tp->lock is held. */
7537 static int tg3_abort_hw(struct tg3 *tp, int silent)
7538 {
7539         int i, err;
7540
7541         tg3_disable_ints(tp);
7542
7543         tp->rx_mode &= ~RX_MODE_ENABLE;
7544         tw32_f(MAC_RX_MODE, tp->rx_mode);
7545         udelay(10);
7546
7547         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7548         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7549         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7550         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7551         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7552         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7553
7554         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7555         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7556         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7557         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7558         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7559         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7560         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7561
7562         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7563         tw32_f(MAC_MODE, tp->mac_mode);
7564         udelay(40);
7565
7566         tp->tx_mode &= ~TX_MODE_ENABLE;
7567         tw32_f(MAC_TX_MODE, tp->tx_mode);
7568
7569         for (i = 0; i < MAX_WAIT_CNT; i++) {
7570                 udelay(100);
7571                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7572                         break;
7573         }
7574         if (i >= MAX_WAIT_CNT) {
7575                 dev_err(&tp->pdev->dev,
7576                         "%s timed out, TX_MODE_ENABLE will not clear "
7577                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7578                 err |= -ENODEV;
7579         }
7580
7581         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7582         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7583         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7584
7585         tw32(FTQ_RESET, 0xffffffff);
7586         tw32(FTQ_RESET, 0x00000000);
7587
7588         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7589         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7590
7591         for (i = 0; i < tp->irq_cnt; i++) {
7592                 struct tg3_napi *tnapi = &tp->napi[i];
7593                 if (tnapi->hw_status)
7594                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7595         }
7596
7597         return err;
7598 }
7599
7600 /* Save PCI command register before chip reset */
7601 static void tg3_save_pci_state(struct tg3 *tp)
7602 {
7603         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7604 }
7605
7606 /* Restore PCI state after chip reset */
7607 static void tg3_restore_pci_state(struct tg3 *tp)
7608 {
7609         u32 val;
7610
7611         /* Re-enable indirect register accesses. */
7612         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7613                                tp->misc_host_ctrl);
7614
7615         /* Set MAX PCI retry to zero. */
7616         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7617         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7618             tg3_flag(tp, PCIX_MODE))
7619                 val |= PCISTATE_RETRY_SAME_DMA;
7620         /* Allow reads and writes to the APE register and memory space. */
7621         if (tg3_flag(tp, ENABLE_APE))
7622                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7623                        PCISTATE_ALLOW_APE_SHMEM_WR |
7624                        PCISTATE_ALLOW_APE_PSPACE_WR;
7625         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7626
7627         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7628
7629         if (!tg3_flag(tp, PCI_EXPRESS)) {
7630                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7631                                       tp->pci_cacheline_sz);
7632                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7633                                       tp->pci_lat_timer);
7634         }
7635
7636         /* Make sure PCI-X relaxed ordering bit is clear. */
7637         if (tg3_flag(tp, PCIX_MODE)) {
7638                 u16 pcix_cmd;
7639
7640                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7641                                      &pcix_cmd);
7642                 pcix_cmd &= ~PCI_X_CMD_ERO;
7643                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7644                                       pcix_cmd);
7645         }
7646
7647         if (tg3_flag(tp, 5780_CLASS)) {
7648
7649                 /* Chip reset on 5780 will reset MSI enable bit,
7650                  * so need to restore it.
7651                  */
7652                 if (tg3_flag(tp, USING_MSI)) {
7653                         u16 ctrl;
7654
7655                         pci_read_config_word(tp->pdev,
7656                                              tp->msi_cap + PCI_MSI_FLAGS,
7657                                              &ctrl);
7658                         pci_write_config_word(tp->pdev,
7659                                               tp->msi_cap + PCI_MSI_FLAGS,
7660                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7661                         val = tr32(MSGINT_MODE);
7662                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7663                 }
7664         }
7665 }
7666
7667 /* tp->lock is held. */
7668 static int tg3_chip_reset(struct tg3 *tp)
7669 {
7670         u32 val;
7671         void (*write_op)(struct tg3 *, u32, u32);
7672         int i, err;
7673
7674         tg3_nvram_lock(tp);
7675
7676         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7677
7678         /* No matching tg3_nvram_unlock() after this because
7679          * chip reset below will undo the nvram lock.
7680          */
7681         tp->nvram_lock_cnt = 0;
7682
7683         /* GRC_MISC_CFG core clock reset will clear the memory
7684          * enable bit in PCI register 4 and the MSI enable bit
7685          * on some chips, so we save relevant registers here.
7686          */
7687         tg3_save_pci_state(tp);
7688
7689         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7690             tg3_flag(tp, 5755_PLUS))
7691                 tw32(GRC_FASTBOOT_PC, 0);
7692
7693         /*
7694          * We must avoid the readl() that normally takes place.
7695          * It locks machines, causes machine checks, and other
7696          * fun things.  So, temporarily disable the 5701
7697          * hardware workaround, while we do the reset.
7698          */
7699         write_op = tp->write32;
7700         if (write_op == tg3_write_flush_reg32)
7701                 tp->write32 = tg3_write32;
7702
7703         /* Prevent the irq handler from reading or writing PCI registers
7704          * during chip reset when the memory enable bit in the PCI command
7705          * register may be cleared.  The chip does not generate interrupt
7706          * at this time, but the irq handler may still be called due to irq
7707          * sharing or irqpoll.
7708          */
7709         tg3_flag_set(tp, CHIP_RESETTING);
7710         for (i = 0; i < tp->irq_cnt; i++) {
7711                 struct tg3_napi *tnapi = &tp->napi[i];
7712                 if (tnapi->hw_status) {
7713                         tnapi->hw_status->status = 0;
7714                         tnapi->hw_status->status_tag = 0;
7715                 }
7716                 tnapi->last_tag = 0;
7717                 tnapi->last_irq_tag = 0;
7718         }
7719         smp_mb();
7720
7721         for (i = 0; i < tp->irq_cnt; i++)
7722                 synchronize_irq(tp->napi[i].irq_vec);
7723
7724         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7725                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7726                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7727         }
7728
7729         /* do the reset */
7730         val = GRC_MISC_CFG_CORECLK_RESET;
7731
7732         if (tg3_flag(tp, PCI_EXPRESS)) {
7733                 /* Force PCIe 1.0a mode */
7734                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7735                     !tg3_flag(tp, 57765_PLUS) &&
7736                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7737                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7738                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7739
7740                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7741                         tw32(GRC_MISC_CFG, (1 << 29));
7742                         val |= (1 << 29);
7743                 }
7744         }
7745
7746         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7747                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7748                 tw32(GRC_VCPU_EXT_CTRL,
7749                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7750         }
7751
7752         /* Manage gphy power for all CPMU absent PCIe devices. */
7753         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7754                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7755
7756         tw32(GRC_MISC_CFG, val);
7757
7758         /* restore 5701 hardware bug workaround write method */
7759         tp->write32 = write_op;
7760
7761         /* Unfortunately, we have to delay before the PCI read back.
7762          * Some 575X chips even will not respond to a PCI cfg access
7763          * when the reset command is given to the chip.
7764          *
7765          * How do these hardware designers expect things to work
7766          * properly if the PCI write is posted for a long period
7767          * of time?  It is always necessary to have some method by
7768          * which a register read back can occur to push the write
7769          * out which does the reset.
7770          *
7771          * For most tg3 variants the trick below was working.
7772          * Ho hum...
7773          */
7774         udelay(120);
7775
7776         /* Flush PCI posted writes.  The normal MMIO registers
7777          * are inaccessible at this time so this is the only
7778          * way to make this reliably (actually, this is no longer
7779          * the case, see above).  I tried to use indirect
7780          * register read/write but this upset some 5701 variants.
7781          */
7782         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7783
7784         udelay(120);
7785
7786         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7787                 u16 val16;
7788
7789                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7790                         int i;
7791                         u32 cfg_val;
7792
7793                         /* Wait for link training to complete.  */
7794                         for (i = 0; i < 5000; i++)
7795                                 udelay(100);
7796
7797                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7798                         pci_write_config_dword(tp->pdev, 0xc4,
7799                                                cfg_val | (1 << 15));
7800                 }
7801
7802                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7803                 pci_read_config_word(tp->pdev,
7804                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7805                                      &val16);
7806                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7807                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7808                 /*
7809                  * Older PCIe devices only support the 128 byte
7810                  * MPS setting.  Enforce the restriction.
7811                  */
7812                 if (!tg3_flag(tp, CPMU_PRESENT))
7813                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7814                 pci_write_config_word(tp->pdev,
7815                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7816                                       val16);
7817
7818                 /* Clear error status */
7819                 pci_write_config_word(tp->pdev,
7820                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7821                                       PCI_EXP_DEVSTA_CED |
7822                                       PCI_EXP_DEVSTA_NFED |
7823                                       PCI_EXP_DEVSTA_FED |
7824                                       PCI_EXP_DEVSTA_URD);
7825         }
7826
7827         tg3_restore_pci_state(tp);
7828
7829         tg3_flag_clear(tp, CHIP_RESETTING);
7830         tg3_flag_clear(tp, ERROR_PROCESSED);
7831
7832         val = 0;
7833         if (tg3_flag(tp, 5780_CLASS))
7834                 val = tr32(MEMARB_MODE);
7835         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7836
7837         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7838                 tg3_stop_fw(tp);
7839                 tw32(0x5000, 0x400);
7840         }
7841
7842         tw32(GRC_MODE, tp->grc_mode);
7843
7844         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7845                 val = tr32(0xc4);
7846
7847                 tw32(0xc4, val | (1 << 15));
7848         }
7849
7850         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7851             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7852                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7853                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7854                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7855                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7856         }
7857
7858         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7859                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7860                 val = tp->mac_mode;
7861         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7862                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7863                 val = tp->mac_mode;
7864         } else
7865                 val = 0;
7866
7867         tw32_f(MAC_MODE, val);
7868         udelay(40);
7869
7870         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7871
7872         err = tg3_poll_fw(tp);
7873         if (err)
7874                 return err;
7875
7876         tg3_mdio_start(tp);
7877
7878         if (tg3_flag(tp, PCI_EXPRESS) &&
7879             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7880             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7881             !tg3_flag(tp, 57765_PLUS)) {
7882                 val = tr32(0x7c00);
7883
7884                 tw32(0x7c00, val | (1 << 25));
7885         }
7886
7887         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7888                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7889                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7890         }
7891
7892         /* Reprobe ASF enable state.  */
7893         tg3_flag_clear(tp, ENABLE_ASF);
7894         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7895         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7896         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7897                 u32 nic_cfg;
7898
7899                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7900                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7901                         tg3_flag_set(tp, ENABLE_ASF);
7902                         tp->last_event_jiffies = jiffies;
7903                         if (tg3_flag(tp, 5750_PLUS))
7904                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7905                 }
7906         }
7907
7908         return 0;
7909 }
7910
7911 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
7912                                                  struct rtnl_link_stats64 *);
7913 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
7914                                                 struct tg3_ethtool_stats *);
7915
7916 /* tp->lock is held. */
7917 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7918 {
7919         int err;
7920
7921         tg3_stop_fw(tp);
7922
7923         tg3_write_sig_pre_reset(tp, kind);
7924
7925         tg3_abort_hw(tp, silent);
7926         err = tg3_chip_reset(tp);
7927
7928         __tg3_set_mac_addr(tp, 0);
7929
7930         tg3_write_sig_legacy(tp, kind);
7931         tg3_write_sig_post_reset(tp, kind);
7932
7933         if (tp->hw_stats) {
7934                 /* Save the stats across chip resets... */
7935                 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
7936                 tg3_get_estats(tp, &tp->estats_prev);
7937
7938                 /* And make sure the next sample is new data */
7939                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7940         }
7941
7942         if (err)
7943                 return err;
7944
7945         return 0;
7946 }
7947
7948 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7949 {
7950         struct tg3 *tp = netdev_priv(dev);
7951         struct sockaddr *addr = p;
7952         int err = 0, skip_mac_1 = 0;
7953
7954         if (!is_valid_ether_addr(addr->sa_data))
7955                 return -EINVAL;
7956
7957         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7958
7959         if (!netif_running(dev))
7960                 return 0;
7961
7962         if (tg3_flag(tp, ENABLE_ASF)) {
7963                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7964
7965                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7966                 addr0_low = tr32(MAC_ADDR_0_LOW);
7967                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7968                 addr1_low = tr32(MAC_ADDR_1_LOW);
7969
7970                 /* Skip MAC addr 1 if ASF is using it. */
7971                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7972                     !(addr1_high == 0 && addr1_low == 0))
7973                         skip_mac_1 = 1;
7974         }
7975         spin_lock_bh(&tp->lock);
7976         __tg3_set_mac_addr(tp, skip_mac_1);
7977         spin_unlock_bh(&tp->lock);
7978
7979         return err;
7980 }
7981
7982 /* tp->lock is held. */
7983 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7984                            dma_addr_t mapping, u32 maxlen_flags,
7985                            u32 nic_addr)
7986 {
7987         tg3_write_mem(tp,
7988                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7989                       ((u64) mapping >> 32));
7990         tg3_write_mem(tp,
7991                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7992                       ((u64) mapping & 0xffffffff));
7993         tg3_write_mem(tp,
7994                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7995                        maxlen_flags);
7996
7997         if (!tg3_flag(tp, 5705_PLUS))
7998                 tg3_write_mem(tp,
7999                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8000                               nic_addr);
8001 }
8002
8003 static void __tg3_set_rx_mode(struct net_device *);
8004 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8005 {
8006         int i;
8007
8008         if (!tg3_flag(tp, ENABLE_TSS)) {
8009                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8010                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8011                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8012         } else {
8013                 tw32(HOSTCC_TXCOL_TICKS, 0);
8014                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8015                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8016         }
8017
8018         if (!tg3_flag(tp, ENABLE_RSS)) {
8019                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8020                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8021                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8022         } else {
8023                 tw32(HOSTCC_RXCOL_TICKS, 0);
8024                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8025                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8026         }
8027
8028         if (!tg3_flag(tp, 5705_PLUS)) {
8029                 u32 val = ec->stats_block_coalesce_usecs;
8030
8031                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8032                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8033
8034                 if (!netif_carrier_ok(tp->dev))
8035                         val = 0;
8036
8037                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8038         }
8039
8040         for (i = 0; i < tp->irq_cnt - 1; i++) {
8041                 u32 reg;
8042
8043                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8044                 tw32(reg, ec->rx_coalesce_usecs);
8045                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8046                 tw32(reg, ec->rx_max_coalesced_frames);
8047                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8048                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8049
8050                 if (tg3_flag(tp, ENABLE_TSS)) {
8051                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8052                         tw32(reg, ec->tx_coalesce_usecs);
8053                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8054                         tw32(reg, ec->tx_max_coalesced_frames);
8055                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8056                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8057                 }
8058         }
8059
8060         for (; i < tp->irq_max - 1; i++) {
8061                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8062                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8063                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8064
8065                 if (tg3_flag(tp, ENABLE_TSS)) {
8066                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8067                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8068                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8069                 }
8070         }
8071 }
8072
8073 /* tp->lock is held. */
8074 static void tg3_rings_reset(struct tg3 *tp)
8075 {
8076         int i;
8077         u32 stblk, txrcb, rxrcb, limit;
8078         struct tg3_napi *tnapi = &tp->napi[0];
8079
8080         /* Disable all transmit rings but the first. */
8081         if (!tg3_flag(tp, 5705_PLUS))
8082                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8083         else if (tg3_flag(tp, 5717_PLUS))
8084                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8085         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8086                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8087         else
8088                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8089
8090         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8091              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8092                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8093                               BDINFO_FLAGS_DISABLED);
8094
8095
8096         /* Disable all receive return rings but the first. */
8097         if (tg3_flag(tp, 5717_PLUS))
8098                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8099         else if (!tg3_flag(tp, 5705_PLUS))
8100                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8101         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8102                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8103                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8104         else
8105                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8106
8107         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8108              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8109                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8110                               BDINFO_FLAGS_DISABLED);
8111
8112         /* Disable interrupts */
8113         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8114         tp->napi[0].chk_msi_cnt = 0;
8115         tp->napi[0].last_rx_cons = 0;
8116         tp->napi[0].last_tx_cons = 0;
8117
8118         /* Zero mailbox registers. */
8119         if (tg3_flag(tp, SUPPORT_MSIX)) {
8120                 for (i = 1; i < tp->irq_max; i++) {
8121                         tp->napi[i].tx_prod = 0;
8122                         tp->napi[i].tx_cons = 0;
8123                         if (tg3_flag(tp, ENABLE_TSS))
8124                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8125                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8126                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8127                         tp->napi[i].chk_msi_cnt = 0;
8128                         tp->napi[i].last_rx_cons = 0;
8129                         tp->napi[i].last_tx_cons = 0;
8130                 }
8131                 if (!tg3_flag(tp, ENABLE_TSS))
8132                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8133         } else {
8134                 tp->napi[0].tx_prod = 0;
8135                 tp->napi[0].tx_cons = 0;
8136                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8137                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8138         }
8139
8140         /* Make sure the NIC-based send BD rings are disabled. */
8141         if (!tg3_flag(tp, 5705_PLUS)) {
8142                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8143                 for (i = 0; i < 16; i++)
8144                         tw32_tx_mbox(mbox + i * 8, 0);
8145         }
8146
8147         txrcb = NIC_SRAM_SEND_RCB;
8148         rxrcb = NIC_SRAM_RCV_RET_RCB;
8149
8150         /* Clear status block in ram. */
8151         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8152
8153         /* Set status block DMA address */
8154         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8155              ((u64) tnapi->status_mapping >> 32));
8156         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8157              ((u64) tnapi->status_mapping & 0xffffffff));
8158
8159         if (tnapi->tx_ring) {
8160                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8161                                (TG3_TX_RING_SIZE <<
8162                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8163                                NIC_SRAM_TX_BUFFER_DESC);
8164                 txrcb += TG3_BDINFO_SIZE;
8165         }
8166
8167         if (tnapi->rx_rcb) {
8168                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8169                                (tp->rx_ret_ring_mask + 1) <<
8170                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8171                 rxrcb += TG3_BDINFO_SIZE;
8172         }
8173
8174         stblk = HOSTCC_STATBLCK_RING1;
8175
8176         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8177                 u64 mapping = (u64)tnapi->status_mapping;
8178                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8179                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8180
8181                 /* Clear status block in ram. */
8182                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8183
8184                 if (tnapi->tx_ring) {
8185                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8186                                        (TG3_TX_RING_SIZE <<
8187                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8188                                        NIC_SRAM_TX_BUFFER_DESC);
8189                         txrcb += TG3_BDINFO_SIZE;
8190                 }
8191
8192                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8193                                ((tp->rx_ret_ring_mask + 1) <<
8194                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8195
8196                 stblk += 8;
8197                 rxrcb += TG3_BDINFO_SIZE;
8198         }
8199 }
8200
8201 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8202 {
8203         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8204
8205         if (!tg3_flag(tp, 5750_PLUS) ||
8206             tg3_flag(tp, 5780_CLASS) ||
8207             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8208             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8209             tg3_flag(tp, 57765_PLUS))
8210                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8211         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8212                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8213                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8214         else
8215                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8216
8217         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8218         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8219
8220         val = min(nic_rep_thresh, host_rep_thresh);
8221         tw32(RCVBDI_STD_THRESH, val);
8222
8223         if (tg3_flag(tp, 57765_PLUS))
8224                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8225
8226         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8227                 return;
8228
8229         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8230
8231         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8232
8233         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8234         tw32(RCVBDI_JUMBO_THRESH, val);
8235
8236         if (tg3_flag(tp, 57765_PLUS))
8237                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8238 }
8239
8240 /* tp->lock is held. */
8241 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8242 {
8243         u32 val, rdmac_mode;
8244         int i, err, limit;
8245         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8246
8247         tg3_disable_ints(tp);
8248
8249         tg3_stop_fw(tp);
8250
8251         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8252
8253         if (tg3_flag(tp, INIT_COMPLETE))
8254                 tg3_abort_hw(tp, 1);
8255
8256         /* Enable MAC control of LPI */
8257         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8258                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8259                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8260                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8261
8262                 tw32_f(TG3_CPMU_EEE_CTRL,
8263                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8264
8265                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8266                       TG3_CPMU_EEEMD_LPI_IN_TX |
8267                       TG3_CPMU_EEEMD_LPI_IN_RX |
8268                       TG3_CPMU_EEEMD_EEE_ENABLE;
8269
8270                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8271                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8272
8273                 if (tg3_flag(tp, ENABLE_APE))
8274                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8275
8276                 tw32_f(TG3_CPMU_EEE_MODE, val);
8277
8278                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8279                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8280                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8281
8282                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8283                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8284                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8285         }
8286
8287         if (reset_phy)
8288                 tg3_phy_reset(tp);
8289
8290         err = tg3_chip_reset(tp);
8291         if (err)
8292                 return err;
8293
8294         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8295
8296         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8297                 val = tr32(TG3_CPMU_CTRL);
8298                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8299                 tw32(TG3_CPMU_CTRL, val);
8300
8301                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8302                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8303                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8304                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8305
8306                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8307                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8308                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8309                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8310
8311                 val = tr32(TG3_CPMU_HST_ACC);
8312                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8313                 val |= CPMU_HST_ACC_MACCLK_6_25;
8314                 tw32(TG3_CPMU_HST_ACC, val);
8315         }
8316
8317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8318                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8319                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8320                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8321                 tw32(PCIE_PWR_MGMT_THRESH, val);
8322
8323                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8324                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8325
8326                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8327
8328                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8329                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8330         }
8331
8332         if (tg3_flag(tp, L1PLLPD_EN)) {
8333                 u32 grc_mode = tr32(GRC_MODE);
8334
8335                 /* Access the lower 1K of PL PCIE block registers. */
8336                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8337                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8338
8339                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8340                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8341                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8342
8343                 tw32(GRC_MODE, grc_mode);
8344         }
8345
8346         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8347                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8348                         u32 grc_mode = tr32(GRC_MODE);
8349
8350                         /* Access the lower 1K of PL PCIE block registers. */
8351                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8352                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8353
8354                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8355                                    TG3_PCIE_PL_LO_PHYCTL5);
8356                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8357                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8358
8359                         tw32(GRC_MODE, grc_mode);
8360                 }
8361
8362                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8363                         u32 grc_mode = tr32(GRC_MODE);
8364
8365                         /* Access the lower 1K of DL PCIE block registers. */
8366                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8367                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8368
8369                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8370                                    TG3_PCIE_DL_LO_FTSMAX);
8371                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8372                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8373                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8374
8375                         tw32(GRC_MODE, grc_mode);
8376                 }
8377
8378                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8379                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8380                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8381                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8382         }
8383
8384         /* This works around an issue with Athlon chipsets on
8385          * B3 tigon3 silicon.  This bit has no effect on any
8386          * other revision.  But do not set this on PCI Express
8387          * chips and don't even touch the clocks if the CPMU is present.
8388          */
8389         if (!tg3_flag(tp, CPMU_PRESENT)) {
8390                 if (!tg3_flag(tp, PCI_EXPRESS))
8391                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8392                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8393         }
8394
8395         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8396             tg3_flag(tp, PCIX_MODE)) {
8397                 val = tr32(TG3PCI_PCISTATE);
8398                 val |= PCISTATE_RETRY_SAME_DMA;
8399                 tw32(TG3PCI_PCISTATE, val);
8400         }
8401
8402         if (tg3_flag(tp, ENABLE_APE)) {
8403                 /* Allow reads and writes to the
8404                  * APE register and memory space.
8405                  */
8406                 val = tr32(TG3PCI_PCISTATE);
8407                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8408                        PCISTATE_ALLOW_APE_SHMEM_WR |
8409                        PCISTATE_ALLOW_APE_PSPACE_WR;
8410                 tw32(TG3PCI_PCISTATE, val);
8411         }
8412
8413         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8414                 /* Enable some hw fixes.  */
8415                 val = tr32(TG3PCI_MSI_DATA);
8416                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8417                 tw32(TG3PCI_MSI_DATA, val);
8418         }
8419
8420         /* Descriptor ring init may make accesses to the
8421          * NIC SRAM area to setup the TX descriptors, so we
8422          * can only do this after the hardware has been
8423          * successfully reset.
8424          */
8425         err = tg3_init_rings(tp);
8426         if (err)
8427                 return err;
8428
8429         if (tg3_flag(tp, 57765_PLUS)) {
8430                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8431                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8432                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8433                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8434                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8435                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8436                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8437                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8438         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8439                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8440                 /* This value is determined during the probe time DMA
8441                  * engine test, tg3_test_dma.
8442                  */
8443                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8444         }
8445
8446         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8447                           GRC_MODE_4X_NIC_SEND_RINGS |
8448                           GRC_MODE_NO_TX_PHDR_CSUM |
8449                           GRC_MODE_NO_RX_PHDR_CSUM);
8450         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8451
8452         /* Pseudo-header checksum is done by hardware logic and not
8453          * the offload processers, so make the chip do the pseudo-
8454          * header checksums on receive.  For transmit it is more
8455          * convenient to do the pseudo-header checksum in software
8456          * as Linux does that on transmit for us in all cases.
8457          */
8458         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8459
8460         tw32(GRC_MODE,
8461              tp->grc_mode |
8462              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8463
8464         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8465         val = tr32(GRC_MISC_CFG);
8466         val &= ~0xff;
8467         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8468         tw32(GRC_MISC_CFG, val);
8469
8470         /* Initialize MBUF/DESC pool. */
8471         if (tg3_flag(tp, 5750_PLUS)) {
8472                 /* Do nothing.  */
8473         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8474                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8475                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8476                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8477                 else
8478                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8479                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8480                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8481         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8482                 int fw_len;
8483
8484                 fw_len = tp->fw_len;
8485                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8486                 tw32(BUFMGR_MB_POOL_ADDR,
8487                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8488                 tw32(BUFMGR_MB_POOL_SIZE,
8489                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8490         }
8491
8492         if (tp->dev->mtu <= ETH_DATA_LEN) {
8493                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8494                      tp->bufmgr_config.mbuf_read_dma_low_water);
8495                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8496                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8497                 tw32(BUFMGR_MB_HIGH_WATER,
8498                      tp->bufmgr_config.mbuf_high_water);
8499         } else {
8500                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8501                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8502                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8503                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8504                 tw32(BUFMGR_MB_HIGH_WATER,
8505                      tp->bufmgr_config.mbuf_high_water_jumbo);
8506         }
8507         tw32(BUFMGR_DMA_LOW_WATER,
8508              tp->bufmgr_config.dma_low_water);
8509         tw32(BUFMGR_DMA_HIGH_WATER,
8510              tp->bufmgr_config.dma_high_water);
8511
8512         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8513         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8514                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8516             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8517             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8518                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8519         tw32(BUFMGR_MODE, val);
8520         for (i = 0; i < 2000; i++) {
8521                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8522                         break;
8523                 udelay(10);
8524         }
8525         if (i >= 2000) {
8526                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8527                 return -ENODEV;
8528         }
8529
8530         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8531                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8532
8533         tg3_setup_rxbd_thresholds(tp);
8534
8535         /* Initialize TG3_BDINFO's at:
8536          *  RCVDBDI_STD_BD:     standard eth size rx ring
8537          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8538          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8539          *
8540          * like so:
8541          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8542          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8543          *                              ring attribute flags
8544          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8545          *
8546          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8547          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8548          *
8549          * The size of each ring is fixed in the firmware, but the location is
8550          * configurable.
8551          */
8552         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8553              ((u64) tpr->rx_std_mapping >> 32));
8554         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8555              ((u64) tpr->rx_std_mapping & 0xffffffff));
8556         if (!tg3_flag(tp, 5717_PLUS))
8557                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8558                      NIC_SRAM_RX_BUFFER_DESC);
8559
8560         /* Disable the mini ring */
8561         if (!tg3_flag(tp, 5705_PLUS))
8562                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8563                      BDINFO_FLAGS_DISABLED);
8564
8565         /* Program the jumbo buffer descriptor ring control
8566          * blocks on those devices that have them.
8567          */
8568         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8569             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8570
8571                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8572                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8573                              ((u64) tpr->rx_jmb_mapping >> 32));
8574                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8575                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8576                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8577                               BDINFO_FLAGS_MAXLEN_SHIFT;
8578                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8579                              val | BDINFO_FLAGS_USE_EXT_RECV);
8580                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8581                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8582                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8583                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8584                 } else {
8585                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8586                              BDINFO_FLAGS_DISABLED);
8587                 }
8588
8589                 if (tg3_flag(tp, 57765_PLUS)) {
8590                         val = TG3_RX_STD_RING_SIZE(tp);
8591                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8592                         val |= (TG3_RX_STD_DMA_SZ << 2);
8593                 } else
8594                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8595         } else
8596                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8597
8598         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8599
8600         tpr->rx_std_prod_idx = tp->rx_pending;
8601         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8602
8603         tpr->rx_jmb_prod_idx =
8604                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8605         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8606
8607         tg3_rings_reset(tp);
8608
8609         /* Initialize MAC address and backoff seed. */
8610         __tg3_set_mac_addr(tp, 0);
8611
8612         /* MTU + ethernet header + FCS + optional VLAN tag */
8613         tw32(MAC_RX_MTU_SIZE,
8614              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8615
8616         /* The slot time is changed by tg3_setup_phy if we
8617          * run at gigabit with half duplex.
8618          */
8619         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8620               (6 << TX_LENGTHS_IPG_SHIFT) |
8621               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8622
8623         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8624                 val |= tr32(MAC_TX_LENGTHS) &
8625                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8626                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8627
8628         tw32(MAC_TX_LENGTHS, val);
8629
8630         /* Receive rules. */
8631         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8632         tw32(RCVLPC_CONFIG, 0x0181);
8633
8634         /* Calculate RDMAC_MODE setting early, we need it to determine
8635          * the RCVLPC_STATE_ENABLE mask.
8636          */
8637         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8638                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8639                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8640                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8641                       RDMAC_MODE_LNGREAD_ENAB);
8642
8643         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8644                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8645
8646         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8647             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8648             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8649                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8650                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8651                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8652
8653         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8654             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8655                 if (tg3_flag(tp, TSO_CAPABLE) &&
8656                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8657                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8658                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8659                            !tg3_flag(tp, IS_5788)) {
8660                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8661                 }
8662         }
8663
8664         if (tg3_flag(tp, PCI_EXPRESS))
8665                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8666
8667         if (tg3_flag(tp, HW_TSO_1) ||
8668             tg3_flag(tp, HW_TSO_2) ||
8669             tg3_flag(tp, HW_TSO_3))
8670                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8671
8672         if (tg3_flag(tp, 57765_PLUS) ||
8673             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8675                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8676
8677         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8678                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8679
8680         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8682             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8683             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8684             tg3_flag(tp, 57765_PLUS)) {
8685                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8686                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8687                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8688                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8689                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8690                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8691                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8692                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8693                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8694                 }
8695                 tw32(TG3_RDMA_RSRVCTRL_REG,
8696                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8697         }
8698
8699         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8700             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8701                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8702                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8703                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8704                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8705         }
8706
8707         /* Receive/send statistics. */
8708         if (tg3_flag(tp, 5750_PLUS)) {
8709                 val = tr32(RCVLPC_STATS_ENABLE);
8710                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8711                 tw32(RCVLPC_STATS_ENABLE, val);
8712         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8713                    tg3_flag(tp, TSO_CAPABLE)) {
8714                 val = tr32(RCVLPC_STATS_ENABLE);
8715                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8716                 tw32(RCVLPC_STATS_ENABLE, val);
8717         } else {
8718                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8719         }
8720         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8721         tw32(SNDDATAI_STATSENAB, 0xffffff);
8722         tw32(SNDDATAI_STATSCTRL,
8723              (SNDDATAI_SCTRL_ENABLE |
8724               SNDDATAI_SCTRL_FASTUPD));
8725
8726         /* Setup host coalescing engine. */
8727         tw32(HOSTCC_MODE, 0);
8728         for (i = 0; i < 2000; i++) {
8729                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8730                         break;
8731                 udelay(10);
8732         }
8733
8734         __tg3_set_coalesce(tp, &tp->coal);
8735
8736         if (!tg3_flag(tp, 5705_PLUS)) {
8737                 /* Status/statistics block address.  See tg3_timer,
8738                  * the tg3_periodic_fetch_stats call there, and
8739                  * tg3_get_stats to see how this works for 5705/5750 chips.
8740                  */
8741                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8742                      ((u64) tp->stats_mapping >> 32));
8743                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8744                      ((u64) tp->stats_mapping & 0xffffffff));
8745                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8746
8747                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8748
8749                 /* Clear statistics and status block memory areas */
8750                 for (i = NIC_SRAM_STATS_BLK;
8751                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8752                      i += sizeof(u32)) {
8753                         tg3_write_mem(tp, i, 0);
8754                         udelay(40);
8755                 }
8756         }
8757
8758         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8759
8760         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8761         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8762         if (!tg3_flag(tp, 5705_PLUS))
8763                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8764
8765         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8766                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8767                 /* reset to prevent losing 1st rx packet intermittently */
8768                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8769                 udelay(10);
8770         }
8771
8772         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8773                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8774                         MAC_MODE_FHDE_ENABLE;
8775         if (tg3_flag(tp, ENABLE_APE))
8776                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8777         if (!tg3_flag(tp, 5705_PLUS) &&
8778             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8779             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8780                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8781         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8782         udelay(40);
8783
8784         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8785          * If TG3_FLAG_IS_NIC is zero, we should read the
8786          * register to preserve the GPIO settings for LOMs. The GPIOs,
8787          * whether used as inputs or outputs, are set by boot code after
8788          * reset.
8789          */
8790         if (!tg3_flag(tp, IS_NIC)) {
8791                 u32 gpio_mask;
8792
8793                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8794                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8795                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8796
8797                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8798                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8799                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8800
8801                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8802                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8803
8804                 tp->grc_local_ctrl &= ~gpio_mask;
8805                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8806
8807                 /* GPIO1 must be driven high for eeprom write protect */
8808                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8809                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8810                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8811         }
8812         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8813         udelay(100);
8814
8815         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8816                 val = tr32(MSGINT_MODE);
8817                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8818                 if (!tg3_flag(tp, 1SHOT_MSI))
8819                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8820                 tw32(MSGINT_MODE, val);
8821         }
8822
8823         if (!tg3_flag(tp, 5705_PLUS)) {
8824                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8825                 udelay(40);
8826         }
8827
8828         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8829                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8830                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8831                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8832                WDMAC_MODE_LNGREAD_ENAB);
8833
8834         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8835             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8836                 if (tg3_flag(tp, TSO_CAPABLE) &&
8837                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8838                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8839                         /* nothing */
8840                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8841                            !tg3_flag(tp, IS_5788)) {
8842                         val |= WDMAC_MODE_RX_ACCEL;
8843                 }
8844         }
8845
8846         /* Enable host coalescing bug fix */
8847         if (tg3_flag(tp, 5755_PLUS))
8848                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8849
8850         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8851                 val |= WDMAC_MODE_BURST_ALL_DATA;
8852
8853         tw32_f(WDMAC_MODE, val);
8854         udelay(40);
8855
8856         if (tg3_flag(tp, PCIX_MODE)) {
8857                 u16 pcix_cmd;
8858
8859                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8860                                      &pcix_cmd);
8861                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8862                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8863                         pcix_cmd |= PCI_X_CMD_READ_2K;
8864                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8865                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8866                         pcix_cmd |= PCI_X_CMD_READ_2K;
8867                 }
8868                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8869                                       pcix_cmd);
8870         }
8871
8872         tw32_f(RDMAC_MODE, rdmac_mode);
8873         udelay(40);
8874
8875         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8876         if (!tg3_flag(tp, 5705_PLUS))
8877                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8878
8879         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8880                 tw32(SNDDATAC_MODE,
8881                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8882         else
8883                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8884
8885         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8886         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8887         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8888         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8889                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8890         tw32(RCVDBDI_MODE, val);
8891         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8892         if (tg3_flag(tp, HW_TSO_1) ||
8893             tg3_flag(tp, HW_TSO_2) ||
8894             tg3_flag(tp, HW_TSO_3))
8895                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8896         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8897         if (tg3_flag(tp, ENABLE_TSS))
8898                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8899         tw32(SNDBDI_MODE, val);
8900         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8901
8902         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8903                 err = tg3_load_5701_a0_firmware_fix(tp);
8904                 if (err)
8905                         return err;
8906         }
8907
8908         if (tg3_flag(tp, TSO_CAPABLE)) {
8909                 err = tg3_load_tso_firmware(tp);
8910                 if (err)
8911                         return err;
8912         }
8913
8914         tp->tx_mode = TX_MODE_ENABLE;
8915
8916         if (tg3_flag(tp, 5755_PLUS) ||
8917             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8918                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8919
8920         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8921                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8922                 tp->tx_mode &= ~val;
8923                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8924         }
8925
8926         tw32_f(MAC_TX_MODE, tp->tx_mode);
8927         udelay(100);
8928
8929         if (tg3_flag(tp, ENABLE_RSS)) {
8930                 int i = 0;
8931                 u32 reg = MAC_RSS_INDIR_TBL_0;
8932
8933                 if (tp->irq_cnt == 2) {
8934                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8935                                 tw32(reg, 0x0);
8936                                 reg += 4;
8937                         }
8938                 } else {
8939                         u32 val;
8940
8941                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8942                                 val = i % (tp->irq_cnt - 1);
8943                                 i++;
8944                                 for (; i % 8; i++) {
8945                                         val <<= 4;
8946                                         val |= (i % (tp->irq_cnt - 1));
8947                                 }
8948                                 tw32(reg, val);
8949                                 reg += 4;
8950                         }
8951                 }
8952
8953                 /* Setup the "secret" hash key. */
8954                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8955                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8956                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8957                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8958                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8959                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8960                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8961                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8962                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8963                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8964         }
8965
8966         tp->rx_mode = RX_MODE_ENABLE;
8967         if (tg3_flag(tp, 5755_PLUS))
8968                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8969
8970         if (tg3_flag(tp, ENABLE_RSS))
8971                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8972                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8973                                RX_MODE_RSS_IPV6_HASH_EN |
8974                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8975                                RX_MODE_RSS_IPV4_HASH_EN |
8976                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8977
8978         tw32_f(MAC_RX_MODE, tp->rx_mode);
8979         udelay(10);
8980
8981         tw32(MAC_LED_CTRL, tp->led_ctrl);
8982
8983         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8984         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8985                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8986                 udelay(10);
8987         }
8988         tw32_f(MAC_RX_MODE, tp->rx_mode);
8989         udelay(10);
8990
8991         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8992                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8993                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8994                         /* Set drive transmission level to 1.2V  */
8995                         /* only if the signal pre-emphasis bit is not set  */
8996                         val = tr32(MAC_SERDES_CFG);
8997                         val &= 0xfffff000;
8998                         val |= 0x880;
8999                         tw32(MAC_SERDES_CFG, val);
9000                 }
9001                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9002                         tw32(MAC_SERDES_CFG, 0x616000);
9003         }
9004
9005         /* Prevent chip from dropping frames when flow control
9006          * is enabled.
9007          */
9008         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9009                 val = 1;
9010         else
9011                 val = 2;
9012         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9013
9014         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9015             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9016                 /* Use hardware link auto-negotiation */
9017                 tg3_flag_set(tp, HW_AUTONEG);
9018         }
9019
9020         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9021             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9022                 u32 tmp;
9023
9024                 tmp = tr32(SERDES_RX_CTRL);
9025                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9026                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9027                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9028                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9029         }
9030
9031         if (!tg3_flag(tp, USE_PHYLIB)) {
9032                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9033                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9034                         tp->link_config.speed = tp->link_config.orig_speed;
9035                         tp->link_config.duplex = tp->link_config.orig_duplex;
9036                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9037                 }
9038
9039                 err = tg3_setup_phy(tp, 0);
9040                 if (err)
9041                         return err;
9042
9043                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9044                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9045                         u32 tmp;
9046
9047                         /* Clear CRC stats. */
9048                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9049                                 tg3_writephy(tp, MII_TG3_TEST1,
9050                                              tmp | MII_TG3_TEST1_CRC_EN);
9051                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9052                         }
9053                 }
9054         }
9055
9056         __tg3_set_rx_mode(tp->dev);
9057
9058         /* Initialize receive rules. */
9059         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9060         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9061         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9062         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9063
9064         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9065                 limit = 8;
9066         else
9067                 limit = 16;
9068         if (tg3_flag(tp, ENABLE_ASF))
9069                 limit -= 4;
9070         switch (limit) {
9071         case 16:
9072                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9073         case 15:
9074                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9075         case 14:
9076                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9077         case 13:
9078                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9079         case 12:
9080                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9081         case 11:
9082                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9083         case 10:
9084                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9085         case 9:
9086                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9087         case 8:
9088                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9089         case 7:
9090                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9091         case 6:
9092                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9093         case 5:
9094                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9095         case 4:
9096                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9097         case 3:
9098                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9099         case 2:
9100         case 1:
9101
9102         default:
9103                 break;
9104         }
9105
9106         if (tg3_flag(tp, ENABLE_APE))
9107                 /* Write our heartbeat update interval to APE. */
9108                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9109                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9110
9111         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9112
9113         return 0;
9114 }
9115
9116 /* Called at device open time to get the chip ready for
9117  * packet processing.  Invoked with tp->lock held.
9118  */
9119 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9120 {
9121         tg3_switch_clocks(tp);
9122
9123         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9124
9125         return tg3_reset_hw(tp, reset_phy);
9126 }
9127
9128 #define TG3_STAT_ADD32(PSTAT, REG) \
9129 do {    u32 __val = tr32(REG); \
9130         (PSTAT)->low += __val; \
9131         if ((PSTAT)->low < __val) \
9132                 (PSTAT)->high += 1; \
9133 } while (0)
9134
9135 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9136 {
9137         struct tg3_hw_stats *sp = tp->hw_stats;
9138
9139         if (!netif_carrier_ok(tp->dev))
9140                 return;
9141
9142         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9143         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9144         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9145         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9146         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9147         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9148         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9149         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9150         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9151         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9152         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9153         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9154         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9155
9156         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9157         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9158         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9159         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9160         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9161         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9162         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9163         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9164         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9165         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9166         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9167         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9168         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9169         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9170
9171         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9172         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9173             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9174             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9175                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9176         } else {
9177                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9178                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9179                 if (val) {
9180                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9181                         sp->rx_discards.low += val;
9182                         if (sp->rx_discards.low < val)
9183                                 sp->rx_discards.high += 1;
9184                 }
9185                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9186         }
9187         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9188 }
9189
9190 static void tg3_chk_missed_msi(struct tg3 *tp)
9191 {
9192         u32 i;
9193
9194         for (i = 0; i < tp->irq_cnt; i++) {
9195                 struct tg3_napi *tnapi = &tp->napi[i];
9196
9197                 if (tg3_has_work(tnapi)) {
9198                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9199                             tnapi->last_tx_cons == tnapi->tx_cons) {
9200                                 if (tnapi->chk_msi_cnt < 1) {
9201                                         tnapi->chk_msi_cnt++;
9202                                         return;
9203                                 }
9204                                 tg3_msi(0, tnapi);
9205                         }
9206                 }
9207                 tnapi->chk_msi_cnt = 0;
9208                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9209                 tnapi->last_tx_cons = tnapi->tx_cons;
9210         }
9211 }
9212
9213 static void tg3_timer(unsigned long __opaque)
9214 {
9215         struct tg3 *tp = (struct tg3 *) __opaque;
9216
9217         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9218                 goto restart_timer;
9219
9220         spin_lock(&tp->lock);
9221
9222         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9223             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9224                 tg3_chk_missed_msi(tp);
9225
9226         if (!tg3_flag(tp, TAGGED_STATUS)) {
9227                 /* All of this garbage is because when using non-tagged
9228                  * IRQ status the mailbox/status_block protocol the chip
9229                  * uses with the cpu is race prone.
9230                  */
9231                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9232                         tw32(GRC_LOCAL_CTRL,
9233                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9234                 } else {
9235                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9236                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9237                 }
9238
9239                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9240                         spin_unlock(&tp->lock);
9241                         tg3_reset_task_schedule(tp);
9242                         goto restart_timer;
9243                 }
9244         }
9245
9246         /* This part only runs once per second. */
9247         if (!--tp->timer_counter) {
9248                 if (tg3_flag(tp, 5705_PLUS))
9249                         tg3_periodic_fetch_stats(tp);
9250
9251                 if (tp->setlpicnt && !--tp->setlpicnt)
9252                         tg3_phy_eee_enable(tp);
9253
9254                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9255                         u32 mac_stat;
9256                         int phy_event;
9257
9258                         mac_stat = tr32(MAC_STATUS);
9259
9260                         phy_event = 0;
9261                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9262                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9263                                         phy_event = 1;
9264                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9265                                 phy_event = 1;
9266
9267                         if (phy_event)
9268                                 tg3_setup_phy(tp, 0);
9269                 } else if (tg3_flag(tp, POLL_SERDES)) {
9270                         u32 mac_stat = tr32(MAC_STATUS);
9271                         int need_setup = 0;
9272
9273                         if (netif_carrier_ok(tp->dev) &&
9274                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9275                                 need_setup = 1;
9276                         }
9277                         if (!netif_carrier_ok(tp->dev) &&
9278                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9279                                          MAC_STATUS_SIGNAL_DET))) {
9280                                 need_setup = 1;
9281                         }
9282                         if (need_setup) {
9283                                 if (!tp->serdes_counter) {
9284                                         tw32_f(MAC_MODE,
9285                                              (tp->mac_mode &
9286                                               ~MAC_MODE_PORT_MODE_MASK));
9287                                         udelay(40);
9288                                         tw32_f(MAC_MODE, tp->mac_mode);
9289                                         udelay(40);
9290                                 }
9291                                 tg3_setup_phy(tp, 0);
9292                         }
9293                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9294                            tg3_flag(tp, 5780_CLASS)) {
9295                         tg3_serdes_parallel_detect(tp);
9296                 }
9297
9298                 tp->timer_counter = tp->timer_multiplier;
9299         }
9300
9301         /* Heartbeat is only sent once every 2 seconds.
9302          *
9303          * The heartbeat is to tell the ASF firmware that the host
9304          * driver is still alive.  In the event that the OS crashes,
9305          * ASF needs to reset the hardware to free up the FIFO space
9306          * that may be filled with rx packets destined for the host.
9307          * If the FIFO is full, ASF will no longer function properly.
9308          *
9309          * Unintended resets have been reported on real time kernels
9310          * where the timer doesn't run on time.  Netpoll will also have
9311          * same problem.
9312          *
9313          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9314          * to check the ring condition when the heartbeat is expiring
9315          * before doing the reset.  This will prevent most unintended
9316          * resets.
9317          */
9318         if (!--tp->asf_counter) {
9319                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9320                         tg3_wait_for_event_ack(tp);
9321
9322                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9323                                       FWCMD_NICDRV_ALIVE3);
9324                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9325                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9326                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9327
9328                         tg3_generate_fw_event(tp);
9329                 }
9330                 tp->asf_counter = tp->asf_multiplier;
9331         }
9332
9333         spin_unlock(&tp->lock);
9334
9335 restart_timer:
9336         tp->timer.expires = jiffies + tp->timer_offset;
9337         add_timer(&tp->timer);
9338 }
9339
9340 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9341 {
9342         irq_handler_t fn;
9343         unsigned long flags;
9344         char *name;
9345         struct tg3_napi *tnapi = &tp->napi[irq_num];
9346
9347         if (tp->irq_cnt == 1)
9348                 name = tp->dev->name;
9349         else {
9350                 name = &tnapi->irq_lbl[0];
9351                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9352                 name[IFNAMSIZ-1] = 0;
9353         }
9354
9355         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9356                 fn = tg3_msi;
9357                 if (tg3_flag(tp, 1SHOT_MSI))
9358                         fn = tg3_msi_1shot;
9359                 flags = 0;
9360         } else {
9361                 fn = tg3_interrupt;
9362                 if (tg3_flag(tp, TAGGED_STATUS))
9363                         fn = tg3_interrupt_tagged;
9364                 flags = IRQF_SHARED;
9365         }
9366
9367         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9368 }
9369
9370 static int tg3_test_interrupt(struct tg3 *tp)
9371 {
9372         struct tg3_napi *tnapi = &tp->napi[0];
9373         struct net_device *dev = tp->dev;
9374         int err, i, intr_ok = 0;
9375         u32 val;
9376
9377         if (!netif_running(dev))
9378                 return -ENODEV;
9379
9380         tg3_disable_ints(tp);
9381
9382         free_irq(tnapi->irq_vec, tnapi);
9383
9384         /*
9385          * Turn off MSI one shot mode.  Otherwise this test has no
9386          * observable way to know whether the interrupt was delivered.
9387          */
9388         if (tg3_flag(tp, 57765_PLUS)) {
9389                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9390                 tw32(MSGINT_MODE, val);
9391         }
9392
9393         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9394                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9395         if (err)
9396                 return err;
9397
9398         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9399         tg3_enable_ints(tp);
9400
9401         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9402                tnapi->coal_now);
9403
9404         for (i = 0; i < 5; i++) {
9405                 u32 int_mbox, misc_host_ctrl;
9406
9407                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9408                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9409
9410                 if ((int_mbox != 0) ||
9411                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9412                         intr_ok = 1;
9413                         break;
9414                 }
9415
9416                 if (tg3_flag(tp, 57765_PLUS) &&
9417                     tnapi->hw_status->status_tag != tnapi->last_tag)
9418                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9419
9420                 msleep(10);
9421         }
9422
9423         tg3_disable_ints(tp);
9424
9425         free_irq(tnapi->irq_vec, tnapi);
9426
9427         err = tg3_request_irq(tp, 0);
9428
9429         if (err)
9430                 return err;
9431
9432         if (intr_ok) {
9433                 /* Reenable MSI one shot mode. */
9434                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9435                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9436                         tw32(MSGINT_MODE, val);
9437                 }
9438                 return 0;
9439         }
9440
9441         return -EIO;
9442 }
9443
9444 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9445  * successfully restored
9446  */
9447 static int tg3_test_msi(struct tg3 *tp)
9448 {
9449         int err;
9450         u16 pci_cmd;
9451
9452         if (!tg3_flag(tp, USING_MSI))
9453                 return 0;
9454
9455         /* Turn off SERR reporting in case MSI terminates with Master
9456          * Abort.
9457          */
9458         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9459         pci_write_config_word(tp->pdev, PCI_COMMAND,
9460                               pci_cmd & ~PCI_COMMAND_SERR);
9461
9462         err = tg3_test_interrupt(tp);
9463
9464         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9465
9466         if (!err)
9467                 return 0;
9468
9469         /* other failures */
9470         if (err != -EIO)
9471                 return err;
9472
9473         /* MSI test failed, go back to INTx mode */
9474         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9475                     "to INTx mode. Please report this failure to the PCI "
9476                     "maintainer and include system chipset information\n");
9477
9478         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9479
9480         pci_disable_msi(tp->pdev);
9481
9482         tg3_flag_clear(tp, USING_MSI);
9483         tp->napi[0].irq_vec = tp->pdev->irq;
9484
9485         err = tg3_request_irq(tp, 0);
9486         if (err)
9487                 return err;
9488
9489         /* Need to reset the chip because the MSI cycle may have terminated
9490          * with Master Abort.
9491          */
9492         tg3_full_lock(tp, 1);
9493
9494         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9495         err = tg3_init_hw(tp, 1);
9496
9497         tg3_full_unlock(tp);
9498
9499         if (err)
9500                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9501
9502         return err;
9503 }
9504
9505 static int tg3_request_firmware(struct tg3 *tp)
9506 {
9507         const __be32 *fw_data;
9508
9509         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9510                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9511                            tp->fw_needed);
9512                 return -ENOENT;
9513         }
9514
9515         fw_data = (void *)tp->fw->data;
9516
9517         /* Firmware blob starts with version numbers, followed by
9518          * start address and _full_ length including BSS sections
9519          * (which must be longer than the actual data, of course
9520          */
9521
9522         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9523         if (tp->fw_len < (tp->fw->size - 12)) {
9524                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9525                            tp->fw_len, tp->fw_needed);
9526                 release_firmware(tp->fw);
9527                 tp->fw = NULL;
9528                 return -EINVAL;
9529         }
9530
9531         /* We no longer need firmware; we have it. */
9532         tp->fw_needed = NULL;
9533         return 0;
9534 }
9535
9536 static bool tg3_enable_msix(struct tg3 *tp)
9537 {
9538         int i, rc, cpus = num_online_cpus();
9539         struct msix_entry msix_ent[tp->irq_max];
9540
9541         if (cpus == 1)
9542                 /* Just fallback to the simpler MSI mode. */
9543                 return false;
9544
9545         /*
9546          * We want as many rx rings enabled as there are cpus.
9547          * The first MSIX vector only deals with link interrupts, etc,
9548          * so we add one to the number of vectors we are requesting.
9549          */
9550         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9551
9552         for (i = 0; i < tp->irq_max; i++) {
9553                 msix_ent[i].entry  = i;
9554                 msix_ent[i].vector = 0;
9555         }
9556
9557         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9558         if (rc < 0) {
9559                 return false;
9560         } else if (rc != 0) {
9561                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9562                         return false;
9563                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9564                               tp->irq_cnt, rc);
9565                 tp->irq_cnt = rc;
9566         }
9567
9568         for (i = 0; i < tp->irq_max; i++)
9569                 tp->napi[i].irq_vec = msix_ent[i].vector;
9570
9571         netif_set_real_num_tx_queues(tp->dev, 1);
9572         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9573         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9574                 pci_disable_msix(tp->pdev);
9575                 return false;
9576         }
9577
9578         if (tp->irq_cnt > 1) {
9579                 tg3_flag_set(tp, ENABLE_RSS);
9580
9581                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9582                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9583                         tg3_flag_set(tp, ENABLE_TSS);
9584                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9585                 }
9586         }
9587
9588         return true;
9589 }
9590
9591 static void tg3_ints_init(struct tg3 *tp)
9592 {
9593         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9594             !tg3_flag(tp, TAGGED_STATUS)) {
9595                 /* All MSI supporting chips should support tagged
9596                  * status.  Assert that this is the case.
9597                  */
9598                 netdev_warn(tp->dev,
9599                             "MSI without TAGGED_STATUS? Not using MSI\n");
9600                 goto defcfg;
9601         }
9602
9603         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9604                 tg3_flag_set(tp, USING_MSIX);
9605         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9606                 tg3_flag_set(tp, USING_MSI);
9607
9608         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9609                 u32 msi_mode = tr32(MSGINT_MODE);
9610                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9611                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9612                 if (!tg3_flag(tp, 1SHOT_MSI))
9613                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9614                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9615         }
9616 defcfg:
9617         if (!tg3_flag(tp, USING_MSIX)) {
9618                 tp->irq_cnt = 1;
9619                 tp->napi[0].irq_vec = tp->pdev->irq;
9620                 netif_set_real_num_tx_queues(tp->dev, 1);
9621                 netif_set_real_num_rx_queues(tp->dev, 1);
9622         }
9623 }
9624
9625 static void tg3_ints_fini(struct tg3 *tp)
9626 {
9627         if (tg3_flag(tp, USING_MSIX))
9628                 pci_disable_msix(tp->pdev);
9629         else if (tg3_flag(tp, USING_MSI))
9630                 pci_disable_msi(tp->pdev);
9631         tg3_flag_clear(tp, USING_MSI);
9632         tg3_flag_clear(tp, USING_MSIX);
9633         tg3_flag_clear(tp, ENABLE_RSS);
9634         tg3_flag_clear(tp, ENABLE_TSS);
9635 }
9636
9637 static int tg3_open(struct net_device *dev)
9638 {
9639         struct tg3 *tp = netdev_priv(dev);
9640         int i, err;
9641
9642         if (tp->fw_needed) {
9643                 err = tg3_request_firmware(tp);
9644                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9645                         if (err)
9646                                 return err;
9647                 } else if (err) {
9648                         netdev_warn(tp->dev, "TSO capability disabled\n");
9649                         tg3_flag_clear(tp, TSO_CAPABLE);
9650                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9651                         netdev_notice(tp->dev, "TSO capability restored\n");
9652                         tg3_flag_set(tp, TSO_CAPABLE);
9653                 }
9654         }
9655
9656         netif_carrier_off(tp->dev);
9657
9658         err = tg3_power_up(tp);
9659         if (err)
9660                 return err;
9661
9662         tg3_full_lock(tp, 0);
9663
9664         tg3_disable_ints(tp);
9665         tg3_flag_clear(tp, INIT_COMPLETE);
9666
9667         tg3_full_unlock(tp);
9668
9669         /*
9670          * Setup interrupts first so we know how
9671          * many NAPI resources to allocate
9672          */
9673         tg3_ints_init(tp);
9674
9675         /* The placement of this call is tied
9676          * to the setup and use of Host TX descriptors.
9677          */
9678         err = tg3_alloc_consistent(tp);
9679         if (err)
9680                 goto err_out1;
9681
9682         tg3_napi_init(tp);
9683
9684         tg3_napi_enable(tp);
9685
9686         for (i = 0; i < tp->irq_cnt; i++) {
9687                 struct tg3_napi *tnapi = &tp->napi[i];
9688                 err = tg3_request_irq(tp, i);
9689                 if (err) {
9690                         for (i--; i >= 0; i--) {
9691                                 tnapi = &tp->napi[i];
9692                                 free_irq(tnapi->irq_vec, tnapi);
9693                         }
9694                         goto err_out2;
9695                 }
9696         }
9697
9698         tg3_full_lock(tp, 0);
9699
9700         err = tg3_init_hw(tp, 1);
9701         if (err) {
9702                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9703                 tg3_free_rings(tp);
9704         } else {
9705                 if (tg3_flag(tp, TAGGED_STATUS) &&
9706                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9707                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9708                         tp->timer_offset = HZ;
9709                 else
9710                         tp->timer_offset = HZ / 10;
9711
9712                 BUG_ON(tp->timer_offset > HZ);
9713                 tp->timer_counter = tp->timer_multiplier =
9714                         (HZ / tp->timer_offset);
9715                 tp->asf_counter = tp->asf_multiplier =
9716                         ((HZ / tp->timer_offset) * 2);
9717
9718                 init_timer(&tp->timer);
9719                 tp->timer.expires = jiffies + tp->timer_offset;
9720                 tp->timer.data = (unsigned long) tp;
9721                 tp->timer.function = tg3_timer;
9722         }
9723
9724         tg3_full_unlock(tp);
9725
9726         if (err)
9727                 goto err_out3;
9728
9729         if (tg3_flag(tp, USING_MSI)) {
9730                 err = tg3_test_msi(tp);
9731
9732                 if (err) {
9733                         tg3_full_lock(tp, 0);
9734                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9735                         tg3_free_rings(tp);
9736                         tg3_full_unlock(tp);
9737
9738                         goto err_out2;
9739                 }
9740
9741                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9742                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9743
9744                         tw32(PCIE_TRANSACTION_CFG,
9745                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9746                 }
9747         }
9748
9749         tg3_phy_start(tp);
9750
9751         tg3_full_lock(tp, 0);
9752
9753         add_timer(&tp->timer);
9754         tg3_flag_set(tp, INIT_COMPLETE);
9755         tg3_enable_ints(tp);
9756
9757         tg3_full_unlock(tp);
9758
9759         netif_tx_start_all_queues(dev);
9760
9761         /*
9762          * Reset loopback feature if it was turned on while the device was down
9763          * make sure that it's installed properly now.
9764          */
9765         if (dev->features & NETIF_F_LOOPBACK)
9766                 tg3_set_loopback(dev, dev->features);
9767
9768         return 0;
9769
9770 err_out3:
9771         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9772                 struct tg3_napi *tnapi = &tp->napi[i];
9773                 free_irq(tnapi->irq_vec, tnapi);
9774         }
9775
9776 err_out2:
9777         tg3_napi_disable(tp);
9778         tg3_napi_fini(tp);
9779         tg3_free_consistent(tp);
9780
9781 err_out1:
9782         tg3_ints_fini(tp);
9783         tg3_frob_aux_power(tp, false);
9784         pci_set_power_state(tp->pdev, PCI_D3hot);
9785         return err;
9786 }
9787
9788 static int tg3_close(struct net_device *dev)
9789 {
9790         int i;
9791         struct tg3 *tp = netdev_priv(dev);
9792
9793         tg3_napi_disable(tp);
9794         tg3_reset_task_cancel(tp);
9795
9796         netif_tx_stop_all_queues(dev);
9797
9798         del_timer_sync(&tp->timer);
9799
9800         tg3_phy_stop(tp);
9801
9802         tg3_full_lock(tp, 1);
9803
9804         tg3_disable_ints(tp);
9805
9806         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9807         tg3_free_rings(tp);
9808         tg3_flag_clear(tp, INIT_COMPLETE);
9809
9810         tg3_full_unlock(tp);
9811
9812         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9813                 struct tg3_napi *tnapi = &tp->napi[i];
9814                 free_irq(tnapi->irq_vec, tnapi);
9815         }
9816
9817         tg3_ints_fini(tp);
9818
9819         /* Clear stats across close / open calls */
9820         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
9821         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
9822
9823         tg3_napi_fini(tp);
9824
9825         tg3_free_consistent(tp);
9826
9827         tg3_power_down(tp);
9828
9829         netif_carrier_off(tp->dev);
9830
9831         return 0;
9832 }
9833
9834 static inline u64 get_stat64(tg3_stat64_t *val)
9835 {
9836        return ((u64)val->high << 32) | ((u64)val->low);
9837 }
9838
9839 static u64 calc_crc_errors(struct tg3 *tp)
9840 {
9841         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9842
9843         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9844             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9845              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9846                 u32 val;
9847
9848                 spin_lock_bh(&tp->lock);
9849                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9850                         tg3_writephy(tp, MII_TG3_TEST1,
9851                                      val | MII_TG3_TEST1_CRC_EN);
9852                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9853                 } else
9854                         val = 0;
9855                 spin_unlock_bh(&tp->lock);
9856
9857                 tp->phy_crc_errors += val;
9858
9859                 return tp->phy_crc_errors;
9860         }
9861
9862         return get_stat64(&hw_stats->rx_fcs_errors);
9863 }
9864
9865 #define ESTAT_ADD(member) \
9866         estats->member =        old_estats->member + \
9867                                 get_stat64(&hw_stats->member)
9868
9869 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
9870                                                struct tg3_ethtool_stats *estats)
9871 {
9872         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9873         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9874
9875         if (!hw_stats)
9876                 return old_estats;
9877
9878         ESTAT_ADD(rx_octets);
9879         ESTAT_ADD(rx_fragments);
9880         ESTAT_ADD(rx_ucast_packets);
9881         ESTAT_ADD(rx_mcast_packets);
9882         ESTAT_ADD(rx_bcast_packets);
9883         ESTAT_ADD(rx_fcs_errors);
9884         ESTAT_ADD(rx_align_errors);
9885         ESTAT_ADD(rx_xon_pause_rcvd);
9886         ESTAT_ADD(rx_xoff_pause_rcvd);
9887         ESTAT_ADD(rx_mac_ctrl_rcvd);
9888         ESTAT_ADD(rx_xoff_entered);
9889         ESTAT_ADD(rx_frame_too_long_errors);
9890         ESTAT_ADD(rx_jabbers);
9891         ESTAT_ADD(rx_undersize_packets);
9892         ESTAT_ADD(rx_in_length_errors);
9893         ESTAT_ADD(rx_out_length_errors);
9894         ESTAT_ADD(rx_64_or_less_octet_packets);
9895         ESTAT_ADD(rx_65_to_127_octet_packets);
9896         ESTAT_ADD(rx_128_to_255_octet_packets);
9897         ESTAT_ADD(rx_256_to_511_octet_packets);
9898         ESTAT_ADD(rx_512_to_1023_octet_packets);
9899         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9900         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9901         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9902         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9903         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9904
9905         ESTAT_ADD(tx_octets);
9906         ESTAT_ADD(tx_collisions);
9907         ESTAT_ADD(tx_xon_sent);
9908         ESTAT_ADD(tx_xoff_sent);
9909         ESTAT_ADD(tx_flow_control);
9910         ESTAT_ADD(tx_mac_errors);
9911         ESTAT_ADD(tx_single_collisions);
9912         ESTAT_ADD(tx_mult_collisions);
9913         ESTAT_ADD(tx_deferred);
9914         ESTAT_ADD(tx_excessive_collisions);
9915         ESTAT_ADD(tx_late_collisions);
9916         ESTAT_ADD(tx_collide_2times);
9917         ESTAT_ADD(tx_collide_3times);
9918         ESTAT_ADD(tx_collide_4times);
9919         ESTAT_ADD(tx_collide_5times);
9920         ESTAT_ADD(tx_collide_6times);
9921         ESTAT_ADD(tx_collide_7times);
9922         ESTAT_ADD(tx_collide_8times);
9923         ESTAT_ADD(tx_collide_9times);
9924         ESTAT_ADD(tx_collide_10times);
9925         ESTAT_ADD(tx_collide_11times);
9926         ESTAT_ADD(tx_collide_12times);
9927         ESTAT_ADD(tx_collide_13times);
9928         ESTAT_ADD(tx_collide_14times);
9929         ESTAT_ADD(tx_collide_15times);
9930         ESTAT_ADD(tx_ucast_packets);
9931         ESTAT_ADD(tx_mcast_packets);
9932         ESTAT_ADD(tx_bcast_packets);
9933         ESTAT_ADD(tx_carrier_sense_errors);
9934         ESTAT_ADD(tx_discards);
9935         ESTAT_ADD(tx_errors);
9936
9937         ESTAT_ADD(dma_writeq_full);
9938         ESTAT_ADD(dma_write_prioq_full);
9939         ESTAT_ADD(rxbds_empty);
9940         ESTAT_ADD(rx_discards);
9941         ESTAT_ADD(rx_errors);
9942         ESTAT_ADD(rx_threshold_hit);
9943
9944         ESTAT_ADD(dma_readq_full);
9945         ESTAT_ADD(dma_read_prioq_full);
9946         ESTAT_ADD(tx_comp_queue_full);
9947
9948         ESTAT_ADD(ring_set_send_prod_index);
9949         ESTAT_ADD(ring_status_update);
9950         ESTAT_ADD(nic_irqs);
9951         ESTAT_ADD(nic_avoided_irqs);
9952         ESTAT_ADD(nic_tx_threshold_hit);
9953
9954         ESTAT_ADD(mbuf_lwm_thresh_hit);
9955
9956         return estats;
9957 }
9958
9959 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9960                                                  struct rtnl_link_stats64 *stats)
9961 {
9962         struct tg3 *tp = netdev_priv(dev);
9963         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9964         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9965
9966         if (!hw_stats)
9967                 return old_stats;
9968
9969         stats->rx_packets = old_stats->rx_packets +
9970                 get_stat64(&hw_stats->rx_ucast_packets) +
9971                 get_stat64(&hw_stats->rx_mcast_packets) +
9972                 get_stat64(&hw_stats->rx_bcast_packets);
9973
9974         stats->tx_packets = old_stats->tx_packets +
9975                 get_stat64(&hw_stats->tx_ucast_packets) +
9976                 get_stat64(&hw_stats->tx_mcast_packets) +
9977                 get_stat64(&hw_stats->tx_bcast_packets);
9978
9979         stats->rx_bytes = old_stats->rx_bytes +
9980                 get_stat64(&hw_stats->rx_octets);
9981         stats->tx_bytes = old_stats->tx_bytes +
9982                 get_stat64(&hw_stats->tx_octets);
9983
9984         stats->rx_errors = old_stats->rx_errors +
9985                 get_stat64(&hw_stats->rx_errors);
9986         stats->tx_errors = old_stats->tx_errors +
9987                 get_stat64(&hw_stats->tx_errors) +
9988                 get_stat64(&hw_stats->tx_mac_errors) +
9989                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9990                 get_stat64(&hw_stats->tx_discards);
9991
9992         stats->multicast = old_stats->multicast +
9993                 get_stat64(&hw_stats->rx_mcast_packets);
9994         stats->collisions = old_stats->collisions +
9995                 get_stat64(&hw_stats->tx_collisions);
9996
9997         stats->rx_length_errors = old_stats->rx_length_errors +
9998                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9999                 get_stat64(&hw_stats->rx_undersize_packets);
10000
10001         stats->rx_over_errors = old_stats->rx_over_errors +
10002                 get_stat64(&hw_stats->rxbds_empty);
10003         stats->rx_frame_errors = old_stats->rx_frame_errors +
10004                 get_stat64(&hw_stats->rx_align_errors);
10005         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10006                 get_stat64(&hw_stats->tx_discards);
10007         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10008                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10009
10010         stats->rx_crc_errors = old_stats->rx_crc_errors +
10011                 calc_crc_errors(tp);
10012
10013         stats->rx_missed_errors = old_stats->rx_missed_errors +
10014                 get_stat64(&hw_stats->rx_discards);
10015
10016         stats->rx_dropped = tp->rx_dropped;
10017         stats->tx_dropped = tp->tx_dropped;
10018
10019         return stats;
10020 }
10021
10022 static inline u32 calc_crc(unsigned char *buf, int len)
10023 {
10024         u32 reg;
10025         u32 tmp;
10026         int j, k;
10027
10028         reg = 0xffffffff;
10029
10030         for (j = 0; j < len; j++) {
10031                 reg ^= buf[j];
10032
10033                 for (k = 0; k < 8; k++) {
10034                         tmp = reg & 0x01;
10035
10036                         reg >>= 1;
10037
10038                         if (tmp)
10039                                 reg ^= 0xedb88320;
10040                 }
10041         }
10042
10043         return ~reg;
10044 }
10045
10046 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10047 {
10048         /* accept or reject all multicast frames */
10049         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10050         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10051         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10052         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10053 }
10054
10055 static void __tg3_set_rx_mode(struct net_device *dev)
10056 {
10057         struct tg3 *tp = netdev_priv(dev);
10058         u32 rx_mode;
10059
10060         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10061                                   RX_MODE_KEEP_VLAN_TAG);
10062
10063 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10064         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10065          * flag clear.
10066          */
10067         if (!tg3_flag(tp, ENABLE_ASF))
10068                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10069 #endif
10070
10071         if (dev->flags & IFF_PROMISC) {
10072                 /* Promiscuous mode. */
10073                 rx_mode |= RX_MODE_PROMISC;
10074         } else if (dev->flags & IFF_ALLMULTI) {
10075                 /* Accept all multicast. */
10076                 tg3_set_multi(tp, 1);
10077         } else if (netdev_mc_empty(dev)) {
10078                 /* Reject all multicast. */
10079                 tg3_set_multi(tp, 0);
10080         } else {
10081                 /* Accept one or more multicast(s). */
10082                 struct netdev_hw_addr *ha;
10083                 u32 mc_filter[4] = { 0, };
10084                 u32 regidx;
10085                 u32 bit;
10086                 u32 crc;
10087
10088                 netdev_for_each_mc_addr(ha, dev) {
10089                         crc = calc_crc(ha->addr, ETH_ALEN);
10090                         bit = ~crc & 0x7f;
10091                         regidx = (bit & 0x60) >> 5;
10092                         bit &= 0x1f;
10093                         mc_filter[regidx] |= (1 << bit);
10094                 }
10095
10096                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10097                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10098                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10099                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10100         }
10101
10102         if (rx_mode != tp->rx_mode) {
10103                 tp->rx_mode = rx_mode;
10104                 tw32_f(MAC_RX_MODE, rx_mode);
10105                 udelay(10);
10106         }
10107 }
10108
10109 static void tg3_set_rx_mode(struct net_device *dev)
10110 {
10111         struct tg3 *tp = netdev_priv(dev);
10112
10113         if (!netif_running(dev))
10114                 return;
10115
10116         tg3_full_lock(tp, 0);
10117         __tg3_set_rx_mode(dev);
10118         tg3_full_unlock(tp);
10119 }
10120
10121 static int tg3_get_regs_len(struct net_device *dev)
10122 {
10123         return TG3_REG_BLK_SIZE;
10124 }
10125
10126 static void tg3_get_regs(struct net_device *dev,
10127                 struct ethtool_regs *regs, void *_p)
10128 {
10129         struct tg3 *tp = netdev_priv(dev);
10130
10131         regs->version = 0;
10132
10133         memset(_p, 0, TG3_REG_BLK_SIZE);
10134
10135         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10136                 return;
10137
10138         tg3_full_lock(tp, 0);
10139
10140         tg3_dump_legacy_regs(tp, (u32 *)_p);
10141
10142         tg3_full_unlock(tp);
10143 }
10144
10145 static int tg3_get_eeprom_len(struct net_device *dev)
10146 {
10147         struct tg3 *tp = netdev_priv(dev);
10148
10149         return tp->nvram_size;
10150 }
10151
10152 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10153 {
10154         struct tg3 *tp = netdev_priv(dev);
10155         int ret;
10156         u8  *pd;
10157         u32 i, offset, len, b_offset, b_count;
10158         __be32 val;
10159
10160         if (tg3_flag(tp, NO_NVRAM))
10161                 return -EINVAL;
10162
10163         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10164                 return -EAGAIN;
10165
10166         offset = eeprom->offset;
10167         len = eeprom->len;
10168         eeprom->len = 0;
10169
10170         eeprom->magic = TG3_EEPROM_MAGIC;
10171
10172         if (offset & 3) {
10173                 /* adjustments to start on required 4 byte boundary */
10174                 b_offset = offset & 3;
10175                 b_count = 4 - b_offset;
10176                 if (b_count > len) {
10177                         /* i.e. offset=1 len=2 */
10178                         b_count = len;
10179                 }
10180                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10181                 if (ret)
10182                         return ret;
10183                 memcpy(data, ((char *)&val) + b_offset, b_count);
10184                 len -= b_count;
10185                 offset += b_count;
10186                 eeprom->len += b_count;
10187         }
10188
10189         /* read bytes up to the last 4 byte boundary */
10190         pd = &data[eeprom->len];
10191         for (i = 0; i < (len - (len & 3)); i += 4) {
10192                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10193                 if (ret) {
10194                         eeprom->len += i;
10195                         return ret;
10196                 }
10197                 memcpy(pd + i, &val, 4);
10198         }
10199         eeprom->len += i;
10200
10201         if (len & 3) {
10202                 /* read last bytes not ending on 4 byte boundary */
10203                 pd = &data[eeprom->len];
10204                 b_count = len & 3;
10205                 b_offset = offset + len - b_count;
10206                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10207                 if (ret)
10208                         return ret;
10209                 memcpy(pd, &val, b_count);
10210                 eeprom->len += b_count;
10211         }
10212         return 0;
10213 }
10214
10215 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10216
10217 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10218 {
10219         struct tg3 *tp = netdev_priv(dev);
10220         int ret;
10221         u32 offset, len, b_offset, odd_len;
10222         u8 *buf;
10223         __be32 start, end;
10224
10225         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10226                 return -EAGAIN;
10227
10228         if (tg3_flag(tp, NO_NVRAM) ||
10229             eeprom->magic != TG3_EEPROM_MAGIC)
10230                 return -EINVAL;
10231
10232         offset = eeprom->offset;
10233         len = eeprom->len;
10234
10235         if ((b_offset = (offset & 3))) {
10236                 /* adjustments to start on required 4 byte boundary */
10237                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10238                 if (ret)
10239                         return ret;
10240                 len += b_offset;
10241                 offset &= ~3;
10242                 if (len < 4)
10243                         len = 4;
10244         }
10245
10246         odd_len = 0;
10247         if (len & 3) {
10248                 /* adjustments to end on required 4 byte boundary */
10249                 odd_len = 1;
10250                 len = (len + 3) & ~3;
10251                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10252                 if (ret)
10253                         return ret;
10254         }
10255
10256         buf = data;
10257         if (b_offset || odd_len) {
10258                 buf = kmalloc(len, GFP_KERNEL);
10259                 if (!buf)
10260                         return -ENOMEM;
10261                 if (b_offset)
10262                         memcpy(buf, &start, 4);
10263                 if (odd_len)
10264                         memcpy(buf+len-4, &end, 4);
10265                 memcpy(buf + b_offset, data, eeprom->len);
10266         }
10267
10268         ret = tg3_nvram_write_block(tp, offset, len, buf);
10269
10270         if (buf != data)
10271                 kfree(buf);
10272
10273         return ret;
10274 }
10275
10276 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10277 {
10278         struct tg3 *tp = netdev_priv(dev);
10279
10280         if (tg3_flag(tp, USE_PHYLIB)) {
10281                 struct phy_device *phydev;
10282                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10283                         return -EAGAIN;
10284                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10285                 return phy_ethtool_gset(phydev, cmd);
10286         }
10287
10288         cmd->supported = (SUPPORTED_Autoneg);
10289
10290         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10291                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10292                                    SUPPORTED_1000baseT_Full);
10293
10294         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10295                 cmd->supported |= (SUPPORTED_100baseT_Half |
10296                                   SUPPORTED_100baseT_Full |
10297                                   SUPPORTED_10baseT_Half |
10298                                   SUPPORTED_10baseT_Full |
10299                                   SUPPORTED_TP);
10300                 cmd->port = PORT_TP;
10301         } else {
10302                 cmd->supported |= SUPPORTED_FIBRE;
10303                 cmd->port = PORT_FIBRE;
10304         }
10305
10306         cmd->advertising = tp->link_config.advertising;
10307         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10308                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10309                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10310                                 cmd->advertising |= ADVERTISED_Pause;
10311                         } else {
10312                                 cmd->advertising |= ADVERTISED_Pause |
10313                                                     ADVERTISED_Asym_Pause;
10314                         }
10315                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10316                         cmd->advertising |= ADVERTISED_Asym_Pause;
10317                 }
10318         }
10319         if (netif_running(dev) && netif_carrier_ok(dev)) {
10320                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10321                 cmd->duplex = tp->link_config.active_duplex;
10322                 cmd->lp_advertising = tp->link_config.rmt_adv;
10323                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10324                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10325                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10326                         else
10327                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10328                 }
10329         } else {
10330                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10331                 cmd->duplex = DUPLEX_INVALID;
10332                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10333         }
10334         cmd->phy_address = tp->phy_addr;
10335         cmd->transceiver = XCVR_INTERNAL;
10336         cmd->autoneg = tp->link_config.autoneg;
10337         cmd->maxtxpkt = 0;
10338         cmd->maxrxpkt = 0;
10339         return 0;
10340 }
10341
10342 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10343 {
10344         struct tg3 *tp = netdev_priv(dev);
10345         u32 speed = ethtool_cmd_speed(cmd);
10346
10347         if (tg3_flag(tp, USE_PHYLIB)) {
10348                 struct phy_device *phydev;
10349                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10350                         return -EAGAIN;
10351                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10352                 return phy_ethtool_sset(phydev, cmd);
10353         }
10354
10355         if (cmd->autoneg != AUTONEG_ENABLE &&
10356             cmd->autoneg != AUTONEG_DISABLE)
10357                 return -EINVAL;
10358
10359         if (cmd->autoneg == AUTONEG_DISABLE &&
10360             cmd->duplex != DUPLEX_FULL &&
10361             cmd->duplex != DUPLEX_HALF)
10362                 return -EINVAL;
10363
10364         if (cmd->autoneg == AUTONEG_ENABLE) {
10365                 u32 mask = ADVERTISED_Autoneg |
10366                            ADVERTISED_Pause |
10367                            ADVERTISED_Asym_Pause;
10368
10369                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10370                         mask |= ADVERTISED_1000baseT_Half |
10371                                 ADVERTISED_1000baseT_Full;
10372
10373                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10374                         mask |= ADVERTISED_100baseT_Half |
10375                                 ADVERTISED_100baseT_Full |
10376                                 ADVERTISED_10baseT_Half |
10377                                 ADVERTISED_10baseT_Full |
10378                                 ADVERTISED_TP;
10379                 else
10380                         mask |= ADVERTISED_FIBRE;
10381
10382                 if (cmd->advertising & ~mask)
10383                         return -EINVAL;
10384
10385                 mask &= (ADVERTISED_1000baseT_Half |
10386                          ADVERTISED_1000baseT_Full |
10387                          ADVERTISED_100baseT_Half |
10388                          ADVERTISED_100baseT_Full |
10389                          ADVERTISED_10baseT_Half |
10390                          ADVERTISED_10baseT_Full);
10391
10392                 cmd->advertising &= mask;
10393         } else {
10394                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10395                         if (speed != SPEED_1000)
10396                                 return -EINVAL;
10397
10398                         if (cmd->duplex != DUPLEX_FULL)
10399                                 return -EINVAL;
10400                 } else {
10401                         if (speed != SPEED_100 &&
10402                             speed != SPEED_10)
10403                                 return -EINVAL;
10404                 }
10405         }
10406
10407         tg3_full_lock(tp, 0);
10408
10409         tp->link_config.autoneg = cmd->autoneg;
10410         if (cmd->autoneg == AUTONEG_ENABLE) {
10411                 tp->link_config.advertising = (cmd->advertising |
10412                                               ADVERTISED_Autoneg);
10413                 tp->link_config.speed = SPEED_INVALID;
10414                 tp->link_config.duplex = DUPLEX_INVALID;
10415         } else {
10416                 tp->link_config.advertising = 0;
10417                 tp->link_config.speed = speed;
10418                 tp->link_config.duplex = cmd->duplex;
10419         }
10420
10421         tp->link_config.orig_speed = tp->link_config.speed;
10422         tp->link_config.orig_duplex = tp->link_config.duplex;
10423         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10424
10425         if (netif_running(dev))
10426                 tg3_setup_phy(tp, 1);
10427
10428         tg3_full_unlock(tp);
10429
10430         return 0;
10431 }
10432
10433 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10434 {
10435         struct tg3 *tp = netdev_priv(dev);
10436
10437         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10438         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10439         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10440         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10441 }
10442
10443 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10444 {
10445         struct tg3 *tp = netdev_priv(dev);
10446
10447         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10448                 wol->supported = WAKE_MAGIC;
10449         else
10450                 wol->supported = 0;
10451         wol->wolopts = 0;
10452         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10453                 wol->wolopts = WAKE_MAGIC;
10454         memset(&wol->sopass, 0, sizeof(wol->sopass));
10455 }
10456
10457 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10458 {
10459         struct tg3 *tp = netdev_priv(dev);
10460         struct device *dp = &tp->pdev->dev;
10461
10462         if (wol->wolopts & ~WAKE_MAGIC)
10463                 return -EINVAL;
10464         if ((wol->wolopts & WAKE_MAGIC) &&
10465             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10466                 return -EINVAL;
10467
10468         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10469
10470         spin_lock_bh(&tp->lock);
10471         if (device_may_wakeup(dp))
10472                 tg3_flag_set(tp, WOL_ENABLE);
10473         else
10474                 tg3_flag_clear(tp, WOL_ENABLE);
10475         spin_unlock_bh(&tp->lock);
10476
10477         return 0;
10478 }
10479
10480 static u32 tg3_get_msglevel(struct net_device *dev)
10481 {
10482         struct tg3 *tp = netdev_priv(dev);
10483         return tp->msg_enable;
10484 }
10485
10486 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10487 {
10488         struct tg3 *tp = netdev_priv(dev);
10489         tp->msg_enable = value;
10490 }
10491
10492 static int tg3_nway_reset(struct net_device *dev)
10493 {
10494         struct tg3 *tp = netdev_priv(dev);
10495         int r;
10496
10497         if (!netif_running(dev))
10498                 return -EAGAIN;
10499
10500         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10501                 return -EINVAL;
10502
10503         if (tg3_flag(tp, USE_PHYLIB)) {
10504                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10505                         return -EAGAIN;
10506                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10507         } else {
10508                 u32 bmcr;
10509
10510                 spin_lock_bh(&tp->lock);
10511                 r = -EINVAL;
10512                 tg3_readphy(tp, MII_BMCR, &bmcr);
10513                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10514                     ((bmcr & BMCR_ANENABLE) ||
10515                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10516                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10517                                                    BMCR_ANENABLE);
10518                         r = 0;
10519                 }
10520                 spin_unlock_bh(&tp->lock);
10521         }
10522
10523         return r;
10524 }
10525
10526 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10527 {
10528         struct tg3 *tp = netdev_priv(dev);
10529
10530         ering->rx_max_pending = tp->rx_std_ring_mask;
10531         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10532                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10533         else
10534                 ering->rx_jumbo_max_pending = 0;
10535
10536         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10537
10538         ering->rx_pending = tp->rx_pending;
10539         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10540                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10541         else
10542                 ering->rx_jumbo_pending = 0;
10543
10544         ering->tx_pending = tp->napi[0].tx_pending;
10545 }
10546
10547 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10548 {
10549         struct tg3 *tp = netdev_priv(dev);
10550         int i, irq_sync = 0, err = 0;
10551
10552         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10553             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10554             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10555             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10556             (tg3_flag(tp, TSO_BUG) &&
10557              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10558                 return -EINVAL;
10559
10560         if (netif_running(dev)) {
10561                 tg3_phy_stop(tp);
10562                 tg3_netif_stop(tp);
10563                 irq_sync = 1;
10564         }
10565
10566         tg3_full_lock(tp, irq_sync);
10567
10568         tp->rx_pending = ering->rx_pending;
10569
10570         if (tg3_flag(tp, MAX_RXPEND_64) &&
10571             tp->rx_pending > 63)
10572                 tp->rx_pending = 63;
10573         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10574
10575         for (i = 0; i < tp->irq_max; i++)
10576                 tp->napi[i].tx_pending = ering->tx_pending;
10577
10578         if (netif_running(dev)) {
10579                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10580                 err = tg3_restart_hw(tp, 1);
10581                 if (!err)
10582                         tg3_netif_start(tp);
10583         }
10584
10585         tg3_full_unlock(tp);
10586
10587         if (irq_sync && !err)
10588                 tg3_phy_start(tp);
10589
10590         return err;
10591 }
10592
10593 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10594 {
10595         struct tg3 *tp = netdev_priv(dev);
10596
10597         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10598
10599         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10600                 epause->rx_pause = 1;
10601         else
10602                 epause->rx_pause = 0;
10603
10604         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10605                 epause->tx_pause = 1;
10606         else
10607                 epause->tx_pause = 0;
10608 }
10609
10610 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10611 {
10612         struct tg3 *tp = netdev_priv(dev);
10613         int err = 0;
10614
10615         if (tg3_flag(tp, USE_PHYLIB)) {
10616                 u32 newadv;
10617                 struct phy_device *phydev;
10618
10619                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10620
10621                 if (!(phydev->supported & SUPPORTED_Pause) ||
10622                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10623                      (epause->rx_pause != epause->tx_pause)))
10624                         return -EINVAL;
10625
10626                 tp->link_config.flowctrl = 0;
10627                 if (epause->rx_pause) {
10628                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10629
10630                         if (epause->tx_pause) {
10631                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10632                                 newadv = ADVERTISED_Pause;
10633                         } else
10634                                 newadv = ADVERTISED_Pause |
10635                                          ADVERTISED_Asym_Pause;
10636                 } else if (epause->tx_pause) {
10637                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10638                         newadv = ADVERTISED_Asym_Pause;
10639                 } else
10640                         newadv = 0;
10641
10642                 if (epause->autoneg)
10643                         tg3_flag_set(tp, PAUSE_AUTONEG);
10644                 else
10645                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10646
10647                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10648                         u32 oldadv = phydev->advertising &
10649                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10650                         if (oldadv != newadv) {
10651                                 phydev->advertising &=
10652                                         ~(ADVERTISED_Pause |
10653                                           ADVERTISED_Asym_Pause);
10654                                 phydev->advertising |= newadv;
10655                                 if (phydev->autoneg) {
10656                                         /*
10657                                          * Always renegotiate the link to
10658                                          * inform our link partner of our
10659                                          * flow control settings, even if the
10660                                          * flow control is forced.  Let
10661                                          * tg3_adjust_link() do the final
10662                                          * flow control setup.
10663                                          */
10664                                         return phy_start_aneg(phydev);
10665                                 }
10666                         }
10667
10668                         if (!epause->autoneg)
10669                                 tg3_setup_flow_control(tp, 0, 0);
10670                 } else {
10671                         tp->link_config.orig_advertising &=
10672                                         ~(ADVERTISED_Pause |
10673                                           ADVERTISED_Asym_Pause);
10674                         tp->link_config.orig_advertising |= newadv;
10675                 }
10676         } else {
10677                 int irq_sync = 0;
10678
10679                 if (netif_running(dev)) {
10680                         tg3_netif_stop(tp);
10681                         irq_sync = 1;
10682                 }
10683
10684                 tg3_full_lock(tp, irq_sync);
10685
10686                 if (epause->autoneg)
10687                         tg3_flag_set(tp, PAUSE_AUTONEG);
10688                 else
10689                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10690                 if (epause->rx_pause)
10691                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10692                 else
10693                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10694                 if (epause->tx_pause)
10695                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10696                 else
10697                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10698
10699                 if (netif_running(dev)) {
10700                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10701                         err = tg3_restart_hw(tp, 1);
10702                         if (!err)
10703                                 tg3_netif_start(tp);
10704                 }
10705
10706                 tg3_full_unlock(tp);
10707         }
10708
10709         return err;
10710 }
10711
10712 static int tg3_get_sset_count(struct net_device *dev, int sset)
10713 {
10714         switch (sset) {
10715         case ETH_SS_TEST:
10716                 return TG3_NUM_TEST;
10717         case ETH_SS_STATS:
10718                 return TG3_NUM_STATS;
10719         default:
10720                 return -EOPNOTSUPP;
10721         }
10722 }
10723
10724 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10725 {
10726         switch (stringset) {
10727         case ETH_SS_STATS:
10728                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10729                 break;
10730         case ETH_SS_TEST:
10731                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10732                 break;
10733         default:
10734                 WARN_ON(1);     /* we need a WARN() */
10735                 break;
10736         }
10737 }
10738
10739 static int tg3_set_phys_id(struct net_device *dev,
10740                             enum ethtool_phys_id_state state)
10741 {
10742         struct tg3 *tp = netdev_priv(dev);
10743
10744         if (!netif_running(tp->dev))
10745                 return -EAGAIN;
10746
10747         switch (state) {
10748         case ETHTOOL_ID_ACTIVE:
10749                 return 1;       /* cycle on/off once per second */
10750
10751         case ETHTOOL_ID_ON:
10752                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10753                      LED_CTRL_1000MBPS_ON |
10754                      LED_CTRL_100MBPS_ON |
10755                      LED_CTRL_10MBPS_ON |
10756                      LED_CTRL_TRAFFIC_OVERRIDE |
10757                      LED_CTRL_TRAFFIC_BLINK |
10758                      LED_CTRL_TRAFFIC_LED);
10759                 break;
10760
10761         case ETHTOOL_ID_OFF:
10762                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10763                      LED_CTRL_TRAFFIC_OVERRIDE);
10764                 break;
10765
10766         case ETHTOOL_ID_INACTIVE:
10767                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10768                 break;
10769         }
10770
10771         return 0;
10772 }
10773
10774 static void tg3_get_ethtool_stats(struct net_device *dev,
10775                                    struct ethtool_stats *estats, u64 *tmp_stats)
10776 {
10777         struct tg3 *tp = netdev_priv(dev);
10778
10779         tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
10780 }
10781
10782 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10783 {
10784         int i;
10785         __be32 *buf;
10786         u32 offset = 0, len = 0;
10787         u32 magic, val;
10788
10789         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10790                 return NULL;
10791
10792         if (magic == TG3_EEPROM_MAGIC) {
10793                 for (offset = TG3_NVM_DIR_START;
10794                      offset < TG3_NVM_DIR_END;
10795                      offset += TG3_NVM_DIRENT_SIZE) {
10796                         if (tg3_nvram_read(tp, offset, &val))
10797                                 return NULL;
10798
10799                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10800                             TG3_NVM_DIRTYPE_EXTVPD)
10801                                 break;
10802                 }
10803
10804                 if (offset != TG3_NVM_DIR_END) {
10805                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10806                         if (tg3_nvram_read(tp, offset + 4, &offset))
10807                                 return NULL;
10808
10809                         offset = tg3_nvram_logical_addr(tp, offset);
10810                 }
10811         }
10812
10813         if (!offset || !len) {
10814                 offset = TG3_NVM_VPD_OFF;
10815                 len = TG3_NVM_VPD_LEN;
10816         }
10817
10818         buf = kmalloc(len, GFP_KERNEL);
10819         if (buf == NULL)
10820                 return NULL;
10821
10822         if (magic == TG3_EEPROM_MAGIC) {
10823                 for (i = 0; i < len; i += 4) {
10824                         /* The data is in little-endian format in NVRAM.
10825                          * Use the big-endian read routines to preserve
10826                          * the byte order as it exists in NVRAM.
10827                          */
10828                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10829                                 goto error;
10830                 }
10831         } else {
10832                 u8 *ptr;
10833                 ssize_t cnt;
10834                 unsigned int pos = 0;
10835
10836                 ptr = (u8 *)&buf[0];
10837                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10838                         cnt = pci_read_vpd(tp->pdev, pos,
10839                                            len - pos, ptr);
10840                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10841                                 cnt = 0;
10842                         else if (cnt < 0)
10843                                 goto error;
10844                 }
10845                 if (pos != len)
10846                         goto error;
10847         }
10848
10849         *vpdlen = len;
10850
10851         return buf;
10852
10853 error:
10854         kfree(buf);
10855         return NULL;
10856 }
10857
10858 #define NVRAM_TEST_SIZE 0x100
10859 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10860 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10861 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10862 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10863 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10864 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10865 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10866 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10867
10868 static int tg3_test_nvram(struct tg3 *tp)
10869 {
10870         u32 csum, magic, len;
10871         __be32 *buf;
10872         int i, j, k, err = 0, size;
10873
10874         if (tg3_flag(tp, NO_NVRAM))
10875                 return 0;
10876
10877         if (tg3_nvram_read(tp, 0, &magic) != 0)
10878                 return -EIO;
10879
10880         if (magic == TG3_EEPROM_MAGIC)
10881                 size = NVRAM_TEST_SIZE;
10882         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10883                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10884                     TG3_EEPROM_SB_FORMAT_1) {
10885                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10886                         case TG3_EEPROM_SB_REVISION_0:
10887                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10888                                 break;
10889                         case TG3_EEPROM_SB_REVISION_2:
10890                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10891                                 break;
10892                         case TG3_EEPROM_SB_REVISION_3:
10893                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10894                                 break;
10895                         case TG3_EEPROM_SB_REVISION_4:
10896                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10897                                 break;
10898                         case TG3_EEPROM_SB_REVISION_5:
10899                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10900                                 break;
10901                         case TG3_EEPROM_SB_REVISION_6:
10902                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10903                                 break;
10904                         default:
10905                                 return -EIO;
10906                         }
10907                 } else
10908                         return 0;
10909         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10910                 size = NVRAM_SELFBOOT_HW_SIZE;
10911         else
10912                 return -EIO;
10913
10914         buf = kmalloc(size, GFP_KERNEL);
10915         if (buf == NULL)
10916                 return -ENOMEM;
10917
10918         err = -EIO;
10919         for (i = 0, j = 0; i < size; i += 4, j++) {
10920                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10921                 if (err)
10922                         break;
10923         }
10924         if (i < size)
10925                 goto out;
10926
10927         /* Selfboot format */
10928         magic = be32_to_cpu(buf[0]);
10929         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10930             TG3_EEPROM_MAGIC_FW) {
10931                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10932
10933                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10934                     TG3_EEPROM_SB_REVISION_2) {
10935                         /* For rev 2, the csum doesn't include the MBA. */
10936                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10937                                 csum8 += buf8[i];
10938                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10939                                 csum8 += buf8[i];
10940                 } else {
10941                         for (i = 0; i < size; i++)
10942                                 csum8 += buf8[i];
10943                 }
10944
10945                 if (csum8 == 0) {
10946                         err = 0;
10947                         goto out;
10948                 }
10949
10950                 err = -EIO;
10951                 goto out;
10952         }
10953
10954         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10955             TG3_EEPROM_MAGIC_HW) {
10956                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10957                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10958                 u8 *buf8 = (u8 *) buf;
10959
10960                 /* Separate the parity bits and the data bytes.  */
10961                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10962                         if ((i == 0) || (i == 8)) {
10963                                 int l;
10964                                 u8 msk;
10965
10966                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10967                                         parity[k++] = buf8[i] & msk;
10968                                 i++;
10969                         } else if (i == 16) {
10970                                 int l;
10971                                 u8 msk;
10972
10973                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10974                                         parity[k++] = buf8[i] & msk;
10975                                 i++;
10976
10977                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10978                                         parity[k++] = buf8[i] & msk;
10979                                 i++;
10980                         }
10981                         data[j++] = buf8[i];
10982                 }
10983
10984                 err = -EIO;
10985                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10986                         u8 hw8 = hweight8(data[i]);
10987
10988                         if ((hw8 & 0x1) && parity[i])
10989                                 goto out;
10990                         else if (!(hw8 & 0x1) && !parity[i])
10991                                 goto out;
10992                 }
10993                 err = 0;
10994                 goto out;
10995         }
10996
10997         err = -EIO;
10998
10999         /* Bootstrap checksum at offset 0x10 */
11000         csum = calc_crc((unsigned char *) buf, 0x10);
11001         if (csum != le32_to_cpu(buf[0x10/4]))
11002                 goto out;
11003
11004         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11005         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11006         if (csum != le32_to_cpu(buf[0xfc/4]))
11007                 goto out;
11008
11009         kfree(buf);
11010
11011         buf = tg3_vpd_readblock(tp, &len);
11012         if (!buf)
11013                 return -ENOMEM;
11014
11015         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11016         if (i > 0) {
11017                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11018                 if (j < 0)
11019                         goto out;
11020
11021                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11022                         goto out;
11023
11024                 i += PCI_VPD_LRDT_TAG_SIZE;
11025                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11026                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11027                 if (j > 0) {
11028                         u8 csum8 = 0;
11029
11030                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11031
11032                         for (i = 0; i <= j; i++)
11033                                 csum8 += ((u8 *)buf)[i];
11034
11035                         if (csum8)
11036                                 goto out;
11037                 }
11038         }
11039
11040         err = 0;
11041
11042 out:
11043         kfree(buf);
11044         return err;
11045 }
11046
11047 #define TG3_SERDES_TIMEOUT_SEC  2
11048 #define TG3_COPPER_TIMEOUT_SEC  6
11049
11050 static int tg3_test_link(struct tg3 *tp)
11051 {
11052         int i, max;
11053
11054         if (!netif_running(tp->dev))
11055                 return -ENODEV;
11056
11057         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11058                 max = TG3_SERDES_TIMEOUT_SEC;
11059         else
11060                 max = TG3_COPPER_TIMEOUT_SEC;
11061
11062         for (i = 0; i < max; i++) {
11063                 if (netif_carrier_ok(tp->dev))
11064                         return 0;
11065
11066                 if (msleep_interruptible(1000))
11067                         break;
11068         }
11069
11070         return -EIO;
11071 }
11072
11073 /* Only test the commonly used registers */
11074 static int tg3_test_registers(struct tg3 *tp)
11075 {
11076         int i, is_5705, is_5750;
11077         u32 offset, read_mask, write_mask, val, save_val, read_val;
11078         static struct {
11079                 u16 offset;
11080                 u16 flags;
11081 #define TG3_FL_5705     0x1
11082 #define TG3_FL_NOT_5705 0x2
11083 #define TG3_FL_NOT_5788 0x4
11084 #define TG3_FL_NOT_5750 0x8
11085                 u32 read_mask;
11086                 u32 write_mask;
11087         } reg_tbl[] = {
11088                 /* MAC Control Registers */
11089                 { MAC_MODE, TG3_FL_NOT_5705,
11090                         0x00000000, 0x00ef6f8c },
11091                 { MAC_MODE, TG3_FL_5705,
11092                         0x00000000, 0x01ef6b8c },
11093                 { MAC_STATUS, TG3_FL_NOT_5705,
11094                         0x03800107, 0x00000000 },
11095                 { MAC_STATUS, TG3_FL_5705,
11096                         0x03800100, 0x00000000 },
11097                 { MAC_ADDR_0_HIGH, 0x0000,
11098                         0x00000000, 0x0000ffff },
11099                 { MAC_ADDR_0_LOW, 0x0000,
11100                         0x00000000, 0xffffffff },
11101                 { MAC_RX_MTU_SIZE, 0x0000,
11102                         0x00000000, 0x0000ffff },
11103                 { MAC_TX_MODE, 0x0000,
11104                         0x00000000, 0x00000070 },
11105                 { MAC_TX_LENGTHS, 0x0000,
11106                         0x00000000, 0x00003fff },
11107                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11108                         0x00000000, 0x000007fc },
11109                 { MAC_RX_MODE, TG3_FL_5705,
11110                         0x00000000, 0x000007dc },
11111                 { MAC_HASH_REG_0, 0x0000,
11112                         0x00000000, 0xffffffff },
11113                 { MAC_HASH_REG_1, 0x0000,
11114                         0x00000000, 0xffffffff },
11115                 { MAC_HASH_REG_2, 0x0000,
11116                         0x00000000, 0xffffffff },
11117                 { MAC_HASH_REG_3, 0x0000,
11118                         0x00000000, 0xffffffff },
11119
11120                 /* Receive Data and Receive BD Initiator Control Registers. */
11121                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11122                         0x00000000, 0xffffffff },
11123                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11124                         0x00000000, 0xffffffff },
11125                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11126                         0x00000000, 0x00000003 },
11127                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11128                         0x00000000, 0xffffffff },
11129                 { RCVDBDI_STD_BD+0, 0x0000,
11130                         0x00000000, 0xffffffff },
11131                 { RCVDBDI_STD_BD+4, 0x0000,
11132                         0x00000000, 0xffffffff },
11133                 { RCVDBDI_STD_BD+8, 0x0000,
11134                         0x00000000, 0xffff0002 },
11135                 { RCVDBDI_STD_BD+0xc, 0x0000,
11136                         0x00000000, 0xffffffff },
11137
11138                 /* Receive BD Initiator Control Registers. */
11139                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11140                         0x00000000, 0xffffffff },
11141                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11142                         0x00000000, 0x000003ff },
11143                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11144                         0x00000000, 0xffffffff },
11145
11146                 /* Host Coalescing Control Registers. */
11147                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11148                         0x00000000, 0x00000004 },
11149                 { HOSTCC_MODE, TG3_FL_5705,
11150                         0x00000000, 0x000000f6 },
11151                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11152                         0x00000000, 0xffffffff },
11153                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11154                         0x00000000, 0x000003ff },
11155                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11156                         0x00000000, 0xffffffff },
11157                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11158                         0x00000000, 0x000003ff },
11159                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11160                         0x00000000, 0xffffffff },
11161                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11162                         0x00000000, 0x000000ff },
11163                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11164                         0x00000000, 0xffffffff },
11165                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11166                         0x00000000, 0x000000ff },
11167                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11168                         0x00000000, 0xffffffff },
11169                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11170                         0x00000000, 0xffffffff },
11171                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11172                         0x00000000, 0xffffffff },
11173                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11174                         0x00000000, 0x000000ff },
11175                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11176                         0x00000000, 0xffffffff },
11177                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11178                         0x00000000, 0x000000ff },
11179                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11180                         0x00000000, 0xffffffff },
11181                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11182                         0x00000000, 0xffffffff },
11183                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11184                         0x00000000, 0xffffffff },
11185                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11186                         0x00000000, 0xffffffff },
11187                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11188                         0x00000000, 0xffffffff },
11189                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11190                         0xffffffff, 0x00000000 },
11191                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11192                         0xffffffff, 0x00000000 },
11193
11194                 /* Buffer Manager Control Registers. */
11195                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11196                         0x00000000, 0x007fff80 },
11197                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11198                         0x00000000, 0x007fffff },
11199                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11200                         0x00000000, 0x0000003f },
11201                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11202                         0x00000000, 0x000001ff },
11203                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11204                         0x00000000, 0x000001ff },
11205                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11206                         0xffffffff, 0x00000000 },
11207                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11208                         0xffffffff, 0x00000000 },
11209
11210                 /* Mailbox Registers */
11211                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11212                         0x00000000, 0x000001ff },
11213                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11214                         0x00000000, 0x000001ff },
11215                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11216                         0x00000000, 0x000007ff },
11217                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11218                         0x00000000, 0x000001ff },
11219
11220                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11221         };
11222
11223         is_5705 = is_5750 = 0;
11224         if (tg3_flag(tp, 5705_PLUS)) {
11225                 is_5705 = 1;
11226                 if (tg3_flag(tp, 5750_PLUS))
11227                         is_5750 = 1;
11228         }
11229
11230         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11231                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11232                         continue;
11233
11234                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11235                         continue;
11236
11237                 if (tg3_flag(tp, IS_5788) &&
11238                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11239                         continue;
11240
11241                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11242                         continue;
11243
11244                 offset = (u32) reg_tbl[i].offset;
11245                 read_mask = reg_tbl[i].read_mask;
11246                 write_mask = reg_tbl[i].write_mask;
11247
11248                 /* Save the original register content */
11249                 save_val = tr32(offset);
11250
11251                 /* Determine the read-only value. */
11252                 read_val = save_val & read_mask;
11253
11254                 /* Write zero to the register, then make sure the read-only bits
11255                  * are not changed and the read/write bits are all zeros.
11256                  */
11257                 tw32(offset, 0);
11258
11259                 val = tr32(offset);
11260
11261                 /* Test the read-only and read/write bits. */
11262                 if (((val & read_mask) != read_val) || (val & write_mask))
11263                         goto out;
11264
11265                 /* Write ones to all the bits defined by RdMask and WrMask, then
11266                  * make sure the read-only bits are not changed and the
11267                  * read/write bits are all ones.
11268                  */
11269                 tw32(offset, read_mask | write_mask);
11270
11271                 val = tr32(offset);
11272
11273                 /* Test the read-only bits. */
11274                 if ((val & read_mask) != read_val)
11275                         goto out;
11276
11277                 /* Test the read/write bits. */
11278                 if ((val & write_mask) != write_mask)
11279                         goto out;
11280
11281                 tw32(offset, save_val);
11282         }
11283
11284         return 0;
11285
11286 out:
11287         if (netif_msg_hw(tp))
11288                 netdev_err(tp->dev,
11289                            "Register test failed at offset %x\n", offset);
11290         tw32(offset, save_val);
11291         return -EIO;
11292 }
11293
11294 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11295 {
11296         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11297         int i;
11298         u32 j;
11299
11300         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11301                 for (j = 0; j < len; j += 4) {
11302                         u32 val;
11303
11304                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11305                         tg3_read_mem(tp, offset + j, &val);
11306                         if (val != test_pattern[i])
11307                                 return -EIO;
11308                 }
11309         }
11310         return 0;
11311 }
11312
11313 static int tg3_test_memory(struct tg3 *tp)
11314 {
11315         static struct mem_entry {
11316                 u32 offset;
11317                 u32 len;
11318         } mem_tbl_570x[] = {
11319                 { 0x00000000, 0x00b50},
11320                 { 0x00002000, 0x1c000},
11321                 { 0xffffffff, 0x00000}
11322         }, mem_tbl_5705[] = {
11323                 { 0x00000100, 0x0000c},
11324                 { 0x00000200, 0x00008},
11325                 { 0x00004000, 0x00800},
11326                 { 0x00006000, 0x01000},
11327                 { 0x00008000, 0x02000},
11328                 { 0x00010000, 0x0e000},
11329                 { 0xffffffff, 0x00000}
11330         }, mem_tbl_5755[] = {
11331                 { 0x00000200, 0x00008},
11332                 { 0x00004000, 0x00800},
11333                 { 0x00006000, 0x00800},
11334                 { 0x00008000, 0x02000},
11335                 { 0x00010000, 0x0c000},
11336                 { 0xffffffff, 0x00000}
11337         }, mem_tbl_5906[] = {
11338                 { 0x00000200, 0x00008},
11339                 { 0x00004000, 0x00400},
11340                 { 0x00006000, 0x00400},
11341                 { 0x00008000, 0x01000},
11342                 { 0x00010000, 0x01000},
11343                 { 0xffffffff, 0x00000}
11344         }, mem_tbl_5717[] = {
11345                 { 0x00000200, 0x00008},
11346                 { 0x00010000, 0x0a000},
11347                 { 0x00020000, 0x13c00},
11348                 { 0xffffffff, 0x00000}
11349         }, mem_tbl_57765[] = {
11350                 { 0x00000200, 0x00008},
11351                 { 0x00004000, 0x00800},
11352                 { 0x00006000, 0x09800},
11353                 { 0x00010000, 0x0a000},
11354                 { 0xffffffff, 0x00000}
11355         };
11356         struct mem_entry *mem_tbl;
11357         int err = 0;
11358         int i;
11359
11360         if (tg3_flag(tp, 5717_PLUS))
11361                 mem_tbl = mem_tbl_5717;
11362         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11363                 mem_tbl = mem_tbl_57765;
11364         else if (tg3_flag(tp, 5755_PLUS))
11365                 mem_tbl = mem_tbl_5755;
11366         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11367                 mem_tbl = mem_tbl_5906;
11368         else if (tg3_flag(tp, 5705_PLUS))
11369                 mem_tbl = mem_tbl_5705;
11370         else
11371                 mem_tbl = mem_tbl_570x;
11372
11373         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11374                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11375                 if (err)
11376                         break;
11377         }
11378
11379         return err;
11380 }
11381
11382 #define TG3_TSO_MSS             500
11383
11384 #define TG3_TSO_IP_HDR_LEN      20
11385 #define TG3_TSO_TCP_HDR_LEN     20
11386 #define TG3_TSO_TCP_OPT_LEN     12
11387
11388 static const u8 tg3_tso_header[] = {
11389 0x08, 0x00,
11390 0x45, 0x00, 0x00, 0x00,
11391 0x00, 0x00, 0x40, 0x00,
11392 0x40, 0x06, 0x00, 0x00,
11393 0x0a, 0x00, 0x00, 0x01,
11394 0x0a, 0x00, 0x00, 0x02,
11395 0x0d, 0x00, 0xe0, 0x00,
11396 0x00, 0x00, 0x01, 0x00,
11397 0x00, 0x00, 0x02, 0x00,
11398 0x80, 0x10, 0x10, 0x00,
11399 0x14, 0x09, 0x00, 0x00,
11400 0x01, 0x01, 0x08, 0x0a,
11401 0x11, 0x11, 0x11, 0x11,
11402 0x11, 0x11, 0x11, 0x11,
11403 };
11404
11405 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11406 {
11407         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11408         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11409         u32 budget;
11410         struct sk_buff *skb;
11411         u8 *tx_data, *rx_data;
11412         dma_addr_t map;
11413         int num_pkts, tx_len, rx_len, i, err;
11414         struct tg3_rx_buffer_desc *desc;
11415         struct tg3_napi *tnapi, *rnapi;
11416         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11417
11418         tnapi = &tp->napi[0];
11419         rnapi = &tp->napi[0];
11420         if (tp->irq_cnt > 1) {
11421                 if (tg3_flag(tp, ENABLE_RSS))
11422                         rnapi = &tp->napi[1];
11423                 if (tg3_flag(tp, ENABLE_TSS))
11424                         tnapi = &tp->napi[1];
11425         }
11426         coal_now = tnapi->coal_now | rnapi->coal_now;
11427
11428         err = -EIO;
11429
11430         tx_len = pktsz;
11431         skb = netdev_alloc_skb(tp->dev, tx_len);
11432         if (!skb)
11433                 return -ENOMEM;
11434
11435         tx_data = skb_put(skb, tx_len);
11436         memcpy(tx_data, tp->dev->dev_addr, 6);
11437         memset(tx_data + 6, 0x0, 8);
11438
11439         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11440
11441         if (tso_loopback) {
11442                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11443
11444                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11445                               TG3_TSO_TCP_OPT_LEN;
11446
11447                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11448                        sizeof(tg3_tso_header));
11449                 mss = TG3_TSO_MSS;
11450
11451                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11452                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11453
11454                 /* Set the total length field in the IP header */
11455                 iph->tot_len = htons((u16)(mss + hdr_len));
11456
11457                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11458                               TXD_FLAG_CPU_POST_DMA);
11459
11460                 if (tg3_flag(tp, HW_TSO_1) ||
11461                     tg3_flag(tp, HW_TSO_2) ||
11462                     tg3_flag(tp, HW_TSO_3)) {
11463                         struct tcphdr *th;
11464                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11465                         th = (struct tcphdr *)&tx_data[val];
11466                         th->check = 0;
11467                 } else
11468                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11469
11470                 if (tg3_flag(tp, HW_TSO_3)) {
11471                         mss |= (hdr_len & 0xc) << 12;
11472                         if (hdr_len & 0x10)
11473                                 base_flags |= 0x00000010;
11474                         base_flags |= (hdr_len & 0x3e0) << 5;
11475                 } else if (tg3_flag(tp, HW_TSO_2))
11476                         mss |= hdr_len << 9;
11477                 else if (tg3_flag(tp, HW_TSO_1) ||
11478                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11479                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11480                 } else {
11481                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11482                 }
11483
11484                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11485         } else {
11486                 num_pkts = 1;
11487                 data_off = ETH_HLEN;
11488         }
11489
11490         for (i = data_off; i < tx_len; i++)
11491                 tx_data[i] = (u8) (i & 0xff);
11492
11493         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11494         if (pci_dma_mapping_error(tp->pdev, map)) {
11495                 dev_kfree_skb(skb);
11496                 return -EIO;
11497         }
11498
11499         val = tnapi->tx_prod;
11500         tnapi->tx_buffers[val].skb = skb;
11501         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11502
11503         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11504                rnapi->coal_now);
11505
11506         udelay(10);
11507
11508         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11509
11510         budget = tg3_tx_avail(tnapi);
11511         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11512                             base_flags | TXD_FLAG_END, mss, 0)) {
11513                 tnapi->tx_buffers[val].skb = NULL;
11514                 dev_kfree_skb(skb);
11515                 return -EIO;
11516         }
11517
11518         tnapi->tx_prod++;
11519
11520         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11521         tr32_mailbox(tnapi->prodmbox);
11522
11523         udelay(10);
11524
11525         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11526         for (i = 0; i < 35; i++) {
11527                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11528                        coal_now);
11529
11530                 udelay(10);
11531
11532                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11533                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11534                 if ((tx_idx == tnapi->tx_prod) &&
11535                     (rx_idx == (rx_start_idx + num_pkts)))
11536                         break;
11537         }
11538
11539         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11540         dev_kfree_skb(skb);
11541
11542         if (tx_idx != tnapi->tx_prod)
11543                 goto out;
11544
11545         if (rx_idx != rx_start_idx + num_pkts)
11546                 goto out;
11547
11548         val = data_off;
11549         while (rx_idx != rx_start_idx) {
11550                 desc = &rnapi->rx_rcb[rx_start_idx++];
11551                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11552                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11553
11554                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11555                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11556                         goto out;
11557
11558                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11559                          - ETH_FCS_LEN;
11560
11561                 if (!tso_loopback) {
11562                         if (rx_len != tx_len)
11563                                 goto out;
11564
11565                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11566                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11567                                         goto out;
11568                         } else {
11569                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11570                                         goto out;
11571                         }
11572                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11573                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11574                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11575                         goto out;
11576                 }
11577
11578                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11579                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11580                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11581                                              mapping);
11582                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11583                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11584                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11585                                              mapping);
11586                 } else
11587                         goto out;
11588
11589                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11590                                             PCI_DMA_FROMDEVICE);
11591
11592                 rx_data += TG3_RX_OFFSET(tp);
11593                 for (i = data_off; i < rx_len; i++, val++) {
11594                         if (*(rx_data + i) != (u8) (val & 0xff))
11595                                 goto out;
11596                 }
11597         }
11598
11599         err = 0;
11600
11601         /* tg3_free_rings will unmap and free the rx_data */
11602 out:
11603         return err;
11604 }
11605
11606 #define TG3_STD_LOOPBACK_FAILED         1
11607 #define TG3_JMB_LOOPBACK_FAILED         2
11608 #define TG3_TSO_LOOPBACK_FAILED         4
11609 #define TG3_LOOPBACK_FAILED \
11610         (TG3_STD_LOOPBACK_FAILED | \
11611          TG3_JMB_LOOPBACK_FAILED | \
11612          TG3_TSO_LOOPBACK_FAILED)
11613
11614 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11615 {
11616         int err = -EIO;
11617         u32 eee_cap;
11618
11619         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11620         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11621
11622         if (!netif_running(tp->dev)) {
11623                 data[0] = TG3_LOOPBACK_FAILED;
11624                 data[1] = TG3_LOOPBACK_FAILED;
11625                 if (do_extlpbk)
11626                         data[2] = TG3_LOOPBACK_FAILED;
11627                 goto done;
11628         }
11629
11630         err = tg3_reset_hw(tp, 1);
11631         if (err) {
11632                 data[0] = TG3_LOOPBACK_FAILED;
11633                 data[1] = TG3_LOOPBACK_FAILED;
11634                 if (do_extlpbk)
11635                         data[2] = TG3_LOOPBACK_FAILED;
11636                 goto done;
11637         }
11638
11639         if (tg3_flag(tp, ENABLE_RSS)) {
11640                 int i;
11641
11642                 /* Reroute all rx packets to the 1st queue */
11643                 for (i = MAC_RSS_INDIR_TBL_0;
11644                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11645                         tw32(i, 0x0);
11646         }
11647
11648         /* HW errata - mac loopback fails in some cases on 5780.
11649          * Normal traffic and PHY loopback are not affected by
11650          * errata.  Also, the MAC loopback test is deprecated for
11651          * all newer ASIC revisions.
11652          */
11653         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11654             !tg3_flag(tp, CPMU_PRESENT)) {
11655                 tg3_mac_loopback(tp, true);
11656
11657                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11658                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11659
11660                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11661                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11662                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11663
11664                 tg3_mac_loopback(tp, false);
11665         }
11666
11667         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11668             !tg3_flag(tp, USE_PHYLIB)) {
11669                 int i;
11670
11671                 tg3_phy_lpbk_set(tp, 0, false);
11672
11673                 /* Wait for link */
11674                 for (i = 0; i < 100; i++) {
11675                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11676                                 break;
11677                         mdelay(1);
11678                 }
11679
11680                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11681                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11682                 if (tg3_flag(tp, TSO_CAPABLE) &&
11683                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11684                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11685                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11686                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11687                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11688
11689                 if (do_extlpbk) {
11690                         tg3_phy_lpbk_set(tp, 0, true);
11691
11692                         /* All link indications report up, but the hardware
11693                          * isn't really ready for about 20 msec.  Double it
11694                          * to be sure.
11695                          */
11696                         mdelay(40);
11697
11698                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11699                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11700                         if (tg3_flag(tp, TSO_CAPABLE) &&
11701                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11702                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11703                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11704                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11705                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11706                 }
11707
11708                 /* Re-enable gphy autopowerdown. */
11709                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11710                         tg3_phy_toggle_apd(tp, true);
11711         }
11712
11713         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11714
11715 done:
11716         tp->phy_flags |= eee_cap;
11717
11718         return err;
11719 }
11720
11721 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11722                           u64 *data)
11723 {
11724         struct tg3 *tp = netdev_priv(dev);
11725         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11726
11727         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11728             tg3_power_up(tp)) {
11729                 etest->flags |= ETH_TEST_FL_FAILED;
11730                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11731                 return;
11732         }
11733
11734         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11735
11736         if (tg3_test_nvram(tp) != 0) {
11737                 etest->flags |= ETH_TEST_FL_FAILED;
11738                 data[0] = 1;
11739         }
11740         if (!doextlpbk && tg3_test_link(tp)) {
11741                 etest->flags |= ETH_TEST_FL_FAILED;
11742                 data[1] = 1;
11743         }
11744         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11745                 int err, err2 = 0, irq_sync = 0;
11746
11747                 if (netif_running(dev)) {
11748                         tg3_phy_stop(tp);
11749                         tg3_netif_stop(tp);
11750                         irq_sync = 1;
11751                 }
11752
11753                 tg3_full_lock(tp, irq_sync);
11754
11755                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11756                 err = tg3_nvram_lock(tp);
11757                 tg3_halt_cpu(tp, RX_CPU_BASE);
11758                 if (!tg3_flag(tp, 5705_PLUS))
11759                         tg3_halt_cpu(tp, TX_CPU_BASE);
11760                 if (!err)
11761                         tg3_nvram_unlock(tp);
11762
11763                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11764                         tg3_phy_reset(tp);
11765
11766                 if (tg3_test_registers(tp) != 0) {
11767                         etest->flags |= ETH_TEST_FL_FAILED;
11768                         data[2] = 1;
11769                 }
11770
11771                 if (tg3_test_memory(tp) != 0) {
11772                         etest->flags |= ETH_TEST_FL_FAILED;
11773                         data[3] = 1;
11774                 }
11775
11776                 if (doextlpbk)
11777                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11778
11779                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11780                         etest->flags |= ETH_TEST_FL_FAILED;
11781
11782                 tg3_full_unlock(tp);
11783
11784                 if (tg3_test_interrupt(tp) != 0) {
11785                         etest->flags |= ETH_TEST_FL_FAILED;
11786                         data[7] = 1;
11787                 }
11788
11789                 tg3_full_lock(tp, 0);
11790
11791                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11792                 if (netif_running(dev)) {
11793                         tg3_flag_set(tp, INIT_COMPLETE);
11794                         err2 = tg3_restart_hw(tp, 1);
11795                         if (!err2)
11796                                 tg3_netif_start(tp);
11797                 }
11798
11799                 tg3_full_unlock(tp);
11800
11801                 if (irq_sync && !err2)
11802                         tg3_phy_start(tp);
11803         }
11804         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11805                 tg3_power_down(tp);
11806
11807 }
11808
11809 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11810 {
11811         struct mii_ioctl_data *data = if_mii(ifr);
11812         struct tg3 *tp = netdev_priv(dev);
11813         int err;
11814
11815         if (tg3_flag(tp, USE_PHYLIB)) {
11816                 struct phy_device *phydev;
11817                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11818                         return -EAGAIN;
11819                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11820                 return phy_mii_ioctl(phydev, ifr, cmd);
11821         }
11822
11823         switch (cmd) {
11824         case SIOCGMIIPHY:
11825                 data->phy_id = tp->phy_addr;
11826
11827                 /* fallthru */
11828         case SIOCGMIIREG: {
11829                 u32 mii_regval;
11830
11831                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11832                         break;                  /* We have no PHY */
11833
11834                 if (!netif_running(dev))
11835                         return -EAGAIN;
11836
11837                 spin_lock_bh(&tp->lock);
11838                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11839                 spin_unlock_bh(&tp->lock);
11840
11841                 data->val_out = mii_regval;
11842
11843                 return err;
11844         }
11845
11846         case SIOCSMIIREG:
11847                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11848                         break;                  /* We have no PHY */
11849
11850                 if (!netif_running(dev))
11851                         return -EAGAIN;
11852
11853                 spin_lock_bh(&tp->lock);
11854                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11855                 spin_unlock_bh(&tp->lock);
11856
11857                 return err;
11858
11859         default:
11860                 /* do nothing */
11861                 break;
11862         }
11863         return -EOPNOTSUPP;
11864 }
11865
11866 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11867 {
11868         struct tg3 *tp = netdev_priv(dev);
11869
11870         memcpy(ec, &tp->coal, sizeof(*ec));
11871         return 0;
11872 }
11873
11874 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11875 {
11876         struct tg3 *tp = netdev_priv(dev);
11877         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11878         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11879
11880         if (!tg3_flag(tp, 5705_PLUS)) {
11881                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11882                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11883                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11884                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11885         }
11886
11887         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11888             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11889             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11890             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11891             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11892             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11893             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11894             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11895             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11896             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11897                 return -EINVAL;
11898
11899         /* No rx interrupts will be generated if both are zero */
11900         if ((ec->rx_coalesce_usecs == 0) &&
11901             (ec->rx_max_coalesced_frames == 0))
11902                 return -EINVAL;
11903
11904         /* No tx interrupts will be generated if both are zero */
11905         if ((ec->tx_coalesce_usecs == 0) &&
11906             (ec->tx_max_coalesced_frames == 0))
11907                 return -EINVAL;
11908
11909         /* Only copy relevant parameters, ignore all others. */
11910         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11911         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11912         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11913         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11914         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11915         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11916         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11917         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11918         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11919
11920         if (netif_running(dev)) {
11921                 tg3_full_lock(tp, 0);
11922                 __tg3_set_coalesce(tp, &tp->coal);
11923                 tg3_full_unlock(tp);
11924         }
11925         return 0;
11926 }
11927
11928 static const struct ethtool_ops tg3_ethtool_ops = {
11929         .get_settings           = tg3_get_settings,
11930         .set_settings           = tg3_set_settings,
11931         .get_drvinfo            = tg3_get_drvinfo,
11932         .get_regs_len           = tg3_get_regs_len,
11933         .get_regs               = tg3_get_regs,
11934         .get_wol                = tg3_get_wol,
11935         .set_wol                = tg3_set_wol,
11936         .get_msglevel           = tg3_get_msglevel,
11937         .set_msglevel           = tg3_set_msglevel,
11938         .nway_reset             = tg3_nway_reset,
11939         .get_link               = ethtool_op_get_link,
11940         .get_eeprom_len         = tg3_get_eeprom_len,
11941         .get_eeprom             = tg3_get_eeprom,
11942         .set_eeprom             = tg3_set_eeprom,
11943         .get_ringparam          = tg3_get_ringparam,
11944         .set_ringparam          = tg3_set_ringparam,
11945         .get_pauseparam         = tg3_get_pauseparam,
11946         .set_pauseparam         = tg3_set_pauseparam,
11947         .self_test              = tg3_self_test,
11948         .get_strings            = tg3_get_strings,
11949         .set_phys_id            = tg3_set_phys_id,
11950         .get_ethtool_stats      = tg3_get_ethtool_stats,
11951         .get_coalesce           = tg3_get_coalesce,
11952         .set_coalesce           = tg3_set_coalesce,
11953         .get_sset_count         = tg3_get_sset_count,
11954 };
11955
11956 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11957 {
11958         u32 cursize, val, magic;
11959
11960         tp->nvram_size = EEPROM_CHIP_SIZE;
11961
11962         if (tg3_nvram_read(tp, 0, &magic) != 0)
11963                 return;
11964
11965         if ((magic != TG3_EEPROM_MAGIC) &&
11966             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11967             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11968                 return;
11969
11970         /*
11971          * Size the chip by reading offsets at increasing powers of two.
11972          * When we encounter our validation signature, we know the addressing
11973          * has wrapped around, and thus have our chip size.
11974          */
11975         cursize = 0x10;
11976
11977         while (cursize < tp->nvram_size) {
11978                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11979                         return;
11980
11981                 if (val == magic)
11982                         break;
11983
11984                 cursize <<= 1;
11985         }
11986
11987         tp->nvram_size = cursize;
11988 }
11989
11990 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11991 {
11992         u32 val;
11993
11994         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11995                 return;
11996
11997         /* Selfboot format */
11998         if (val != TG3_EEPROM_MAGIC) {
11999                 tg3_get_eeprom_size(tp);
12000                 return;
12001         }
12002
12003         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12004                 if (val != 0) {
12005                         /* This is confusing.  We want to operate on the
12006                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12007                          * call will read from NVRAM and byteswap the data
12008                          * according to the byteswapping settings for all
12009                          * other register accesses.  This ensures the data we
12010                          * want will always reside in the lower 16-bits.
12011                          * However, the data in NVRAM is in LE format, which
12012                          * means the data from the NVRAM read will always be
12013                          * opposite the endianness of the CPU.  The 16-bit
12014                          * byteswap then brings the data to CPU endianness.
12015                          */
12016                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12017                         return;
12018                 }
12019         }
12020         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12021 }
12022
12023 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12024 {
12025         u32 nvcfg1;
12026
12027         nvcfg1 = tr32(NVRAM_CFG1);
12028         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12029                 tg3_flag_set(tp, FLASH);
12030         } else {
12031                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12032                 tw32(NVRAM_CFG1, nvcfg1);
12033         }
12034
12035         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12036             tg3_flag(tp, 5780_CLASS)) {
12037                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12038                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12039                         tp->nvram_jedecnum = JEDEC_ATMEL;
12040                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12041                         tg3_flag_set(tp, NVRAM_BUFFERED);
12042                         break;
12043                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12044                         tp->nvram_jedecnum = JEDEC_ATMEL;
12045                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12046                         break;
12047                 case FLASH_VENDOR_ATMEL_EEPROM:
12048                         tp->nvram_jedecnum = JEDEC_ATMEL;
12049                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12050                         tg3_flag_set(tp, NVRAM_BUFFERED);
12051                         break;
12052                 case FLASH_VENDOR_ST:
12053                         tp->nvram_jedecnum = JEDEC_ST;
12054                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12055                         tg3_flag_set(tp, NVRAM_BUFFERED);
12056                         break;
12057                 case FLASH_VENDOR_SAIFUN:
12058                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12059                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12060                         break;
12061                 case FLASH_VENDOR_SST_SMALL:
12062                 case FLASH_VENDOR_SST_LARGE:
12063                         tp->nvram_jedecnum = JEDEC_SST;
12064                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12065                         break;
12066                 }
12067         } else {
12068                 tp->nvram_jedecnum = JEDEC_ATMEL;
12069                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12070                 tg3_flag_set(tp, NVRAM_BUFFERED);
12071         }
12072 }
12073
12074 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12075 {
12076         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12077         case FLASH_5752PAGE_SIZE_256:
12078                 tp->nvram_pagesize = 256;
12079                 break;
12080         case FLASH_5752PAGE_SIZE_512:
12081                 tp->nvram_pagesize = 512;
12082                 break;
12083         case FLASH_5752PAGE_SIZE_1K:
12084                 tp->nvram_pagesize = 1024;
12085                 break;
12086         case FLASH_5752PAGE_SIZE_2K:
12087                 tp->nvram_pagesize = 2048;
12088                 break;
12089         case FLASH_5752PAGE_SIZE_4K:
12090                 tp->nvram_pagesize = 4096;
12091                 break;
12092         case FLASH_5752PAGE_SIZE_264:
12093                 tp->nvram_pagesize = 264;
12094                 break;
12095         case FLASH_5752PAGE_SIZE_528:
12096                 tp->nvram_pagesize = 528;
12097                 break;
12098         }
12099 }
12100
12101 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12102 {
12103         u32 nvcfg1;
12104
12105         nvcfg1 = tr32(NVRAM_CFG1);
12106
12107         /* NVRAM protection for TPM */
12108         if (nvcfg1 & (1 << 27))
12109                 tg3_flag_set(tp, PROTECTED_NVRAM);
12110
12111         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12112         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12113         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12114                 tp->nvram_jedecnum = JEDEC_ATMEL;
12115                 tg3_flag_set(tp, NVRAM_BUFFERED);
12116                 break;
12117         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12118                 tp->nvram_jedecnum = JEDEC_ATMEL;
12119                 tg3_flag_set(tp, NVRAM_BUFFERED);
12120                 tg3_flag_set(tp, FLASH);
12121                 break;
12122         case FLASH_5752VENDOR_ST_M45PE10:
12123         case FLASH_5752VENDOR_ST_M45PE20:
12124         case FLASH_5752VENDOR_ST_M45PE40:
12125                 tp->nvram_jedecnum = JEDEC_ST;
12126                 tg3_flag_set(tp, NVRAM_BUFFERED);
12127                 tg3_flag_set(tp, FLASH);
12128                 break;
12129         }
12130
12131         if (tg3_flag(tp, FLASH)) {
12132                 tg3_nvram_get_pagesize(tp, nvcfg1);
12133         } else {
12134                 /* For eeprom, set pagesize to maximum eeprom size */
12135                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12136
12137                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12138                 tw32(NVRAM_CFG1, nvcfg1);
12139         }
12140 }
12141
12142 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12143 {
12144         u32 nvcfg1, protect = 0;
12145
12146         nvcfg1 = tr32(NVRAM_CFG1);
12147
12148         /* NVRAM protection for TPM */
12149         if (nvcfg1 & (1 << 27)) {
12150                 tg3_flag_set(tp, PROTECTED_NVRAM);
12151                 protect = 1;
12152         }
12153
12154         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12155         switch (nvcfg1) {
12156         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12157         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12158         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12159         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12160                 tp->nvram_jedecnum = JEDEC_ATMEL;
12161                 tg3_flag_set(tp, NVRAM_BUFFERED);
12162                 tg3_flag_set(tp, FLASH);
12163                 tp->nvram_pagesize = 264;
12164                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12165                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12166                         tp->nvram_size = (protect ? 0x3e200 :
12167                                           TG3_NVRAM_SIZE_512KB);
12168                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12169                         tp->nvram_size = (protect ? 0x1f200 :
12170                                           TG3_NVRAM_SIZE_256KB);
12171                 else
12172                         tp->nvram_size = (protect ? 0x1f200 :
12173                                           TG3_NVRAM_SIZE_128KB);
12174                 break;
12175         case FLASH_5752VENDOR_ST_M45PE10:
12176         case FLASH_5752VENDOR_ST_M45PE20:
12177         case FLASH_5752VENDOR_ST_M45PE40:
12178                 tp->nvram_jedecnum = JEDEC_ST;
12179                 tg3_flag_set(tp, NVRAM_BUFFERED);
12180                 tg3_flag_set(tp, FLASH);
12181                 tp->nvram_pagesize = 256;
12182                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12183                         tp->nvram_size = (protect ?
12184                                           TG3_NVRAM_SIZE_64KB :
12185                                           TG3_NVRAM_SIZE_128KB);
12186                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12187                         tp->nvram_size = (protect ?
12188                                           TG3_NVRAM_SIZE_64KB :
12189                                           TG3_NVRAM_SIZE_256KB);
12190                 else
12191                         tp->nvram_size = (protect ?
12192                                           TG3_NVRAM_SIZE_128KB :
12193                                           TG3_NVRAM_SIZE_512KB);
12194                 break;
12195         }
12196 }
12197
12198 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12199 {
12200         u32 nvcfg1;
12201
12202         nvcfg1 = tr32(NVRAM_CFG1);
12203
12204         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12205         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12206         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12207         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12208         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12209                 tp->nvram_jedecnum = JEDEC_ATMEL;
12210                 tg3_flag_set(tp, NVRAM_BUFFERED);
12211                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12212
12213                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12214                 tw32(NVRAM_CFG1, nvcfg1);
12215                 break;
12216         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12217         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12218         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12219         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12220                 tp->nvram_jedecnum = JEDEC_ATMEL;
12221                 tg3_flag_set(tp, NVRAM_BUFFERED);
12222                 tg3_flag_set(tp, FLASH);
12223                 tp->nvram_pagesize = 264;
12224                 break;
12225         case FLASH_5752VENDOR_ST_M45PE10:
12226         case FLASH_5752VENDOR_ST_M45PE20:
12227         case FLASH_5752VENDOR_ST_M45PE40:
12228                 tp->nvram_jedecnum = JEDEC_ST;
12229                 tg3_flag_set(tp, NVRAM_BUFFERED);
12230                 tg3_flag_set(tp, FLASH);
12231                 tp->nvram_pagesize = 256;
12232                 break;
12233         }
12234 }
12235
12236 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12237 {
12238         u32 nvcfg1, protect = 0;
12239
12240         nvcfg1 = tr32(NVRAM_CFG1);
12241
12242         /* NVRAM protection for TPM */
12243         if (nvcfg1 & (1 << 27)) {
12244                 tg3_flag_set(tp, PROTECTED_NVRAM);
12245                 protect = 1;
12246         }
12247
12248         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12249         switch (nvcfg1) {
12250         case FLASH_5761VENDOR_ATMEL_ADB021D:
12251         case FLASH_5761VENDOR_ATMEL_ADB041D:
12252         case FLASH_5761VENDOR_ATMEL_ADB081D:
12253         case FLASH_5761VENDOR_ATMEL_ADB161D:
12254         case FLASH_5761VENDOR_ATMEL_MDB021D:
12255         case FLASH_5761VENDOR_ATMEL_MDB041D:
12256         case FLASH_5761VENDOR_ATMEL_MDB081D:
12257         case FLASH_5761VENDOR_ATMEL_MDB161D:
12258                 tp->nvram_jedecnum = JEDEC_ATMEL;
12259                 tg3_flag_set(tp, NVRAM_BUFFERED);
12260                 tg3_flag_set(tp, FLASH);
12261                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12262                 tp->nvram_pagesize = 256;
12263                 break;
12264         case FLASH_5761VENDOR_ST_A_M45PE20:
12265         case FLASH_5761VENDOR_ST_A_M45PE40:
12266         case FLASH_5761VENDOR_ST_A_M45PE80:
12267         case FLASH_5761VENDOR_ST_A_M45PE16:
12268         case FLASH_5761VENDOR_ST_M_M45PE20:
12269         case FLASH_5761VENDOR_ST_M_M45PE40:
12270         case FLASH_5761VENDOR_ST_M_M45PE80:
12271         case FLASH_5761VENDOR_ST_M_M45PE16:
12272                 tp->nvram_jedecnum = JEDEC_ST;
12273                 tg3_flag_set(tp, NVRAM_BUFFERED);
12274                 tg3_flag_set(tp, FLASH);
12275                 tp->nvram_pagesize = 256;
12276                 break;
12277         }
12278
12279         if (protect) {
12280                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12281         } else {
12282                 switch (nvcfg1) {
12283                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12284                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12285                 case FLASH_5761VENDOR_ST_A_M45PE16:
12286                 case FLASH_5761VENDOR_ST_M_M45PE16:
12287                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12288                         break;
12289                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12290                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12291                 case FLASH_5761VENDOR_ST_A_M45PE80:
12292                 case FLASH_5761VENDOR_ST_M_M45PE80:
12293                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12294                         break;
12295                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12296                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12297                 case FLASH_5761VENDOR_ST_A_M45PE40:
12298                 case FLASH_5761VENDOR_ST_M_M45PE40:
12299                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12300                         break;
12301                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12302                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12303                 case FLASH_5761VENDOR_ST_A_M45PE20:
12304                 case FLASH_5761VENDOR_ST_M_M45PE20:
12305                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12306                         break;
12307                 }
12308         }
12309 }
12310
12311 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12312 {
12313         tp->nvram_jedecnum = JEDEC_ATMEL;
12314         tg3_flag_set(tp, NVRAM_BUFFERED);
12315         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12316 }
12317
12318 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12319 {
12320         u32 nvcfg1;
12321
12322         nvcfg1 = tr32(NVRAM_CFG1);
12323
12324         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12325         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12326         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12327                 tp->nvram_jedecnum = JEDEC_ATMEL;
12328                 tg3_flag_set(tp, NVRAM_BUFFERED);
12329                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12330
12331                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12332                 tw32(NVRAM_CFG1, nvcfg1);
12333                 return;
12334         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12335         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12336         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12337         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12338         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12339         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12340         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12341                 tp->nvram_jedecnum = JEDEC_ATMEL;
12342                 tg3_flag_set(tp, NVRAM_BUFFERED);
12343                 tg3_flag_set(tp, FLASH);
12344
12345                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12346                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12347                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12348                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12349                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12350                         break;
12351                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12352                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12353                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12354                         break;
12355                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12356                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12357                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12358                         break;
12359                 }
12360                 break;
12361         case FLASH_5752VENDOR_ST_M45PE10:
12362         case FLASH_5752VENDOR_ST_M45PE20:
12363         case FLASH_5752VENDOR_ST_M45PE40:
12364                 tp->nvram_jedecnum = JEDEC_ST;
12365                 tg3_flag_set(tp, NVRAM_BUFFERED);
12366                 tg3_flag_set(tp, FLASH);
12367
12368                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12369                 case FLASH_5752VENDOR_ST_M45PE10:
12370                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12371                         break;
12372                 case FLASH_5752VENDOR_ST_M45PE20:
12373                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12374                         break;
12375                 case FLASH_5752VENDOR_ST_M45PE40:
12376                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12377                         break;
12378                 }
12379                 break;
12380         default:
12381                 tg3_flag_set(tp, NO_NVRAM);
12382                 return;
12383         }
12384
12385         tg3_nvram_get_pagesize(tp, nvcfg1);
12386         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12387                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12388 }
12389
12390
12391 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12392 {
12393         u32 nvcfg1;
12394
12395         nvcfg1 = tr32(NVRAM_CFG1);
12396
12397         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12398         case FLASH_5717VENDOR_ATMEL_EEPROM:
12399         case FLASH_5717VENDOR_MICRO_EEPROM:
12400                 tp->nvram_jedecnum = JEDEC_ATMEL;
12401                 tg3_flag_set(tp, NVRAM_BUFFERED);
12402                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12403
12404                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12405                 tw32(NVRAM_CFG1, nvcfg1);
12406                 return;
12407         case FLASH_5717VENDOR_ATMEL_MDB011D:
12408         case FLASH_5717VENDOR_ATMEL_ADB011B:
12409         case FLASH_5717VENDOR_ATMEL_ADB011D:
12410         case FLASH_5717VENDOR_ATMEL_MDB021D:
12411         case FLASH_5717VENDOR_ATMEL_ADB021B:
12412         case FLASH_5717VENDOR_ATMEL_ADB021D:
12413         case FLASH_5717VENDOR_ATMEL_45USPT:
12414                 tp->nvram_jedecnum = JEDEC_ATMEL;
12415                 tg3_flag_set(tp, NVRAM_BUFFERED);
12416                 tg3_flag_set(tp, FLASH);
12417
12418                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12419                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12420                         /* Detect size with tg3_nvram_get_size() */
12421                         break;
12422                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12423                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12424                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12425                         break;
12426                 default:
12427                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12428                         break;
12429                 }
12430                 break;
12431         case FLASH_5717VENDOR_ST_M_M25PE10:
12432         case FLASH_5717VENDOR_ST_A_M25PE10:
12433         case FLASH_5717VENDOR_ST_M_M45PE10:
12434         case FLASH_5717VENDOR_ST_A_M45PE10:
12435         case FLASH_5717VENDOR_ST_M_M25PE20:
12436         case FLASH_5717VENDOR_ST_A_M25PE20:
12437         case FLASH_5717VENDOR_ST_M_M45PE20:
12438         case FLASH_5717VENDOR_ST_A_M45PE20:
12439         case FLASH_5717VENDOR_ST_25USPT:
12440         case FLASH_5717VENDOR_ST_45USPT:
12441                 tp->nvram_jedecnum = JEDEC_ST;
12442                 tg3_flag_set(tp, NVRAM_BUFFERED);
12443                 tg3_flag_set(tp, FLASH);
12444
12445                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12446                 case FLASH_5717VENDOR_ST_M_M25PE20:
12447                 case FLASH_5717VENDOR_ST_M_M45PE20:
12448                         /* Detect size with tg3_nvram_get_size() */
12449                         break;
12450                 case FLASH_5717VENDOR_ST_A_M25PE20:
12451                 case FLASH_5717VENDOR_ST_A_M45PE20:
12452                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12453                         break;
12454                 default:
12455                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12456                         break;
12457                 }
12458                 break;
12459         default:
12460                 tg3_flag_set(tp, NO_NVRAM);
12461                 return;
12462         }
12463
12464         tg3_nvram_get_pagesize(tp, nvcfg1);
12465         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12466                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12467 }
12468
12469 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12470 {
12471         u32 nvcfg1, nvmpinstrp;
12472
12473         nvcfg1 = tr32(NVRAM_CFG1);
12474         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12475
12476         switch (nvmpinstrp) {
12477         case FLASH_5720_EEPROM_HD:
12478         case FLASH_5720_EEPROM_LD:
12479                 tp->nvram_jedecnum = JEDEC_ATMEL;
12480                 tg3_flag_set(tp, NVRAM_BUFFERED);
12481
12482                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12483                 tw32(NVRAM_CFG1, nvcfg1);
12484                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12485                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12486                 else
12487                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12488                 return;
12489         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12490         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12491         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12492         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12493         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12494         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12495         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12496         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12497         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12498         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12499         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12500         case FLASH_5720VENDOR_ATMEL_45USPT:
12501                 tp->nvram_jedecnum = JEDEC_ATMEL;
12502                 tg3_flag_set(tp, NVRAM_BUFFERED);
12503                 tg3_flag_set(tp, FLASH);
12504
12505                 switch (nvmpinstrp) {
12506                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12507                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12508                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12509                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12510                         break;
12511                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12512                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12513                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12514                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12515                         break;
12516                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12517                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12518                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12519                         break;
12520                 default:
12521                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12522                         break;
12523                 }
12524                 break;
12525         case FLASH_5720VENDOR_M_ST_M25PE10:
12526         case FLASH_5720VENDOR_M_ST_M45PE10:
12527         case FLASH_5720VENDOR_A_ST_M25PE10:
12528         case FLASH_5720VENDOR_A_ST_M45PE10:
12529         case FLASH_5720VENDOR_M_ST_M25PE20:
12530         case FLASH_5720VENDOR_M_ST_M45PE20:
12531         case FLASH_5720VENDOR_A_ST_M25PE20:
12532         case FLASH_5720VENDOR_A_ST_M45PE20:
12533         case FLASH_5720VENDOR_M_ST_M25PE40:
12534         case FLASH_5720VENDOR_M_ST_M45PE40:
12535         case FLASH_5720VENDOR_A_ST_M25PE40:
12536         case FLASH_5720VENDOR_A_ST_M45PE40:
12537         case FLASH_5720VENDOR_M_ST_M25PE80:
12538         case FLASH_5720VENDOR_M_ST_M45PE80:
12539         case FLASH_5720VENDOR_A_ST_M25PE80:
12540         case FLASH_5720VENDOR_A_ST_M45PE80:
12541         case FLASH_5720VENDOR_ST_25USPT:
12542         case FLASH_5720VENDOR_ST_45USPT:
12543                 tp->nvram_jedecnum = JEDEC_ST;
12544                 tg3_flag_set(tp, NVRAM_BUFFERED);
12545                 tg3_flag_set(tp, FLASH);
12546
12547                 switch (nvmpinstrp) {
12548                 case FLASH_5720VENDOR_M_ST_M25PE20:
12549                 case FLASH_5720VENDOR_M_ST_M45PE20:
12550                 case FLASH_5720VENDOR_A_ST_M25PE20:
12551                 case FLASH_5720VENDOR_A_ST_M45PE20:
12552                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12553                         break;
12554                 case FLASH_5720VENDOR_M_ST_M25PE40:
12555                 case FLASH_5720VENDOR_M_ST_M45PE40:
12556                 case FLASH_5720VENDOR_A_ST_M25PE40:
12557                 case FLASH_5720VENDOR_A_ST_M45PE40:
12558                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12559                         break;
12560                 case FLASH_5720VENDOR_M_ST_M25PE80:
12561                 case FLASH_5720VENDOR_M_ST_M45PE80:
12562                 case FLASH_5720VENDOR_A_ST_M25PE80:
12563                 case FLASH_5720VENDOR_A_ST_M45PE80:
12564                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12565                         break;
12566                 default:
12567                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12568                         break;
12569                 }
12570                 break;
12571         default:
12572                 tg3_flag_set(tp, NO_NVRAM);
12573                 return;
12574         }
12575
12576         tg3_nvram_get_pagesize(tp, nvcfg1);
12577         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12578                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12579 }
12580
12581 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12582 static void __devinit tg3_nvram_init(struct tg3 *tp)
12583 {
12584         tw32_f(GRC_EEPROM_ADDR,
12585              (EEPROM_ADDR_FSM_RESET |
12586               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12587                EEPROM_ADDR_CLKPERD_SHIFT)));
12588
12589         msleep(1);
12590
12591         /* Enable seeprom accesses. */
12592         tw32_f(GRC_LOCAL_CTRL,
12593              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12594         udelay(100);
12595
12596         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12597             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12598                 tg3_flag_set(tp, NVRAM);
12599
12600                 if (tg3_nvram_lock(tp)) {
12601                         netdev_warn(tp->dev,
12602                                     "Cannot get nvram lock, %s failed\n",
12603                                     __func__);
12604                         return;
12605                 }
12606                 tg3_enable_nvram_access(tp);
12607
12608                 tp->nvram_size = 0;
12609
12610                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12611                         tg3_get_5752_nvram_info(tp);
12612                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12613                         tg3_get_5755_nvram_info(tp);
12614                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12615                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12616                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12617                         tg3_get_5787_nvram_info(tp);
12618                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12619                         tg3_get_5761_nvram_info(tp);
12620                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12621                         tg3_get_5906_nvram_info(tp);
12622                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12623                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12624                         tg3_get_57780_nvram_info(tp);
12625                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12626                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12627                         tg3_get_5717_nvram_info(tp);
12628                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12629                         tg3_get_5720_nvram_info(tp);
12630                 else
12631                         tg3_get_nvram_info(tp);
12632
12633                 if (tp->nvram_size == 0)
12634                         tg3_get_nvram_size(tp);
12635
12636                 tg3_disable_nvram_access(tp);
12637                 tg3_nvram_unlock(tp);
12638
12639         } else {
12640                 tg3_flag_clear(tp, NVRAM);
12641                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12642
12643                 tg3_get_eeprom_size(tp);
12644         }
12645 }
12646
12647 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12648                                     u32 offset, u32 len, u8 *buf)
12649 {
12650         int i, j, rc = 0;
12651         u32 val;
12652
12653         for (i = 0; i < len; i += 4) {
12654                 u32 addr;
12655                 __be32 data;
12656
12657                 addr = offset + i;
12658
12659                 memcpy(&data, buf + i, 4);
12660
12661                 /*
12662                  * The SEEPROM interface expects the data to always be opposite
12663                  * the native endian format.  We accomplish this by reversing
12664                  * all the operations that would have been performed on the
12665                  * data from a call to tg3_nvram_read_be32().
12666                  */
12667                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12668
12669                 val = tr32(GRC_EEPROM_ADDR);
12670                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12671
12672                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12673                         EEPROM_ADDR_READ);
12674                 tw32(GRC_EEPROM_ADDR, val |
12675                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12676                         (addr & EEPROM_ADDR_ADDR_MASK) |
12677                         EEPROM_ADDR_START |
12678                         EEPROM_ADDR_WRITE);
12679
12680                 for (j = 0; j < 1000; j++) {
12681                         val = tr32(GRC_EEPROM_ADDR);
12682
12683                         if (val & EEPROM_ADDR_COMPLETE)
12684                                 break;
12685                         msleep(1);
12686                 }
12687                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12688                         rc = -EBUSY;
12689                         break;
12690                 }
12691         }
12692
12693         return rc;
12694 }
12695
12696 /* offset and length are dword aligned */
12697 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12698                 u8 *buf)
12699 {
12700         int ret = 0;
12701         u32 pagesize = tp->nvram_pagesize;
12702         u32 pagemask = pagesize - 1;
12703         u32 nvram_cmd;
12704         u8 *tmp;
12705
12706         tmp = kmalloc(pagesize, GFP_KERNEL);
12707         if (tmp == NULL)
12708                 return -ENOMEM;
12709
12710         while (len) {
12711                 int j;
12712                 u32 phy_addr, page_off, size;
12713
12714                 phy_addr = offset & ~pagemask;
12715
12716                 for (j = 0; j < pagesize; j += 4) {
12717                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12718                                                   (__be32 *) (tmp + j));
12719                         if (ret)
12720                                 break;
12721                 }
12722                 if (ret)
12723                         break;
12724
12725                 page_off = offset & pagemask;
12726                 size = pagesize;
12727                 if (len < size)
12728                         size = len;
12729
12730                 len -= size;
12731
12732                 memcpy(tmp + page_off, buf, size);
12733
12734                 offset = offset + (pagesize - page_off);
12735
12736                 tg3_enable_nvram_access(tp);
12737
12738                 /*
12739                  * Before we can erase the flash page, we need
12740                  * to issue a special "write enable" command.
12741                  */
12742                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12743
12744                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12745                         break;
12746
12747                 /* Erase the target page */
12748                 tw32(NVRAM_ADDR, phy_addr);
12749
12750                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12751                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12752
12753                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12754                         break;
12755
12756                 /* Issue another write enable to start the write. */
12757                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12758
12759                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12760                         break;
12761
12762                 for (j = 0; j < pagesize; j += 4) {
12763                         __be32 data;
12764
12765                         data = *((__be32 *) (tmp + j));
12766
12767                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12768
12769                         tw32(NVRAM_ADDR, phy_addr + j);
12770
12771                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12772                                 NVRAM_CMD_WR;
12773
12774                         if (j == 0)
12775                                 nvram_cmd |= NVRAM_CMD_FIRST;
12776                         else if (j == (pagesize - 4))
12777                                 nvram_cmd |= NVRAM_CMD_LAST;
12778
12779                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12780                                 break;
12781                 }
12782                 if (ret)
12783                         break;
12784         }
12785
12786         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12787         tg3_nvram_exec_cmd(tp, nvram_cmd);
12788
12789         kfree(tmp);
12790
12791         return ret;
12792 }
12793
12794 /* offset and length are dword aligned */
12795 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12796                 u8 *buf)
12797 {
12798         int i, ret = 0;
12799
12800         for (i = 0; i < len; i += 4, offset += 4) {
12801                 u32 page_off, phy_addr, nvram_cmd;
12802                 __be32 data;
12803
12804                 memcpy(&data, buf + i, 4);
12805                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12806
12807                 page_off = offset % tp->nvram_pagesize;
12808
12809                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12810
12811                 tw32(NVRAM_ADDR, phy_addr);
12812
12813                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12814
12815                 if (page_off == 0 || i == 0)
12816                         nvram_cmd |= NVRAM_CMD_FIRST;
12817                 if (page_off == (tp->nvram_pagesize - 4))
12818                         nvram_cmd |= NVRAM_CMD_LAST;
12819
12820                 if (i == (len - 4))
12821                         nvram_cmd |= NVRAM_CMD_LAST;
12822
12823                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12824                     !tg3_flag(tp, 5755_PLUS) &&
12825                     (tp->nvram_jedecnum == JEDEC_ST) &&
12826                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12827
12828                         if ((ret = tg3_nvram_exec_cmd(tp,
12829                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12830                                 NVRAM_CMD_DONE)))
12831
12832                                 break;
12833                 }
12834                 if (!tg3_flag(tp, FLASH)) {
12835                         /* We always do complete word writes to eeprom. */
12836                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12837                 }
12838
12839                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12840                         break;
12841         }
12842         return ret;
12843 }
12844
12845 /* offset and length are dword aligned */
12846 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12847 {
12848         int ret;
12849
12850         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12851                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12852                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12853                 udelay(40);
12854         }
12855
12856         if (!tg3_flag(tp, NVRAM)) {
12857                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12858         } else {
12859                 u32 grc_mode;
12860
12861                 ret = tg3_nvram_lock(tp);
12862                 if (ret)
12863                         return ret;
12864
12865                 tg3_enable_nvram_access(tp);
12866                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12867                         tw32(NVRAM_WRITE1, 0x406);
12868
12869                 grc_mode = tr32(GRC_MODE);
12870                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12871
12872                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12873                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12874                                 buf);
12875                 } else {
12876                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12877                                 buf);
12878                 }
12879
12880                 grc_mode = tr32(GRC_MODE);
12881                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12882
12883                 tg3_disable_nvram_access(tp);
12884                 tg3_nvram_unlock(tp);
12885         }
12886
12887         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12888                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12889                 udelay(40);
12890         }
12891
12892         return ret;
12893 }
12894
12895 struct subsys_tbl_ent {
12896         u16 subsys_vendor, subsys_devid;
12897         u32 phy_id;
12898 };
12899
12900 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12901         /* Broadcom boards. */
12902         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12903           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12904         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12905           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12906         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12907           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12908         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12909           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12910         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12911           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12912         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12913           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12914         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12915           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12916         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12917           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12918         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12919           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12920         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12921           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12922         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12923           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12924
12925         /* 3com boards. */
12926         { TG3PCI_SUBVENDOR_ID_3COM,
12927           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12928         { TG3PCI_SUBVENDOR_ID_3COM,
12929           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12930         { TG3PCI_SUBVENDOR_ID_3COM,
12931           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12932         { TG3PCI_SUBVENDOR_ID_3COM,
12933           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12934         { TG3PCI_SUBVENDOR_ID_3COM,
12935           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12936
12937         /* DELL boards. */
12938         { TG3PCI_SUBVENDOR_ID_DELL,
12939           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12940         { TG3PCI_SUBVENDOR_ID_DELL,
12941           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12942         { TG3PCI_SUBVENDOR_ID_DELL,
12943           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12944         { TG3PCI_SUBVENDOR_ID_DELL,
12945           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12946
12947         /* Compaq boards. */
12948         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12949           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12950         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12951           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12952         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12953           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12954         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12955           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12956         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12957           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12958
12959         /* IBM boards. */
12960         { TG3PCI_SUBVENDOR_ID_IBM,
12961           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12962 };
12963
12964 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12965 {
12966         int i;
12967
12968         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12969                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12970                      tp->pdev->subsystem_vendor) &&
12971                     (subsys_id_to_phy_id[i].subsys_devid ==
12972                      tp->pdev->subsystem_device))
12973                         return &subsys_id_to_phy_id[i];
12974         }
12975         return NULL;
12976 }
12977
12978 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12979 {
12980         u32 val;
12981
12982         tp->phy_id = TG3_PHY_ID_INVALID;
12983         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12984
12985         /* Assume an onboard device and WOL capable by default.  */
12986         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12987         tg3_flag_set(tp, WOL_CAP);
12988
12989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12990                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12991                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12992                         tg3_flag_set(tp, IS_NIC);
12993                 }
12994                 val = tr32(VCPU_CFGSHDW);
12995                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12996                         tg3_flag_set(tp, ASPM_WORKAROUND);
12997                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12998                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12999                         tg3_flag_set(tp, WOL_ENABLE);
13000                         device_set_wakeup_enable(&tp->pdev->dev, true);
13001                 }
13002                 goto done;
13003         }
13004
13005         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13006         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13007                 u32 nic_cfg, led_cfg;
13008                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13009                 int eeprom_phy_serdes = 0;
13010
13011                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13012                 tp->nic_sram_data_cfg = nic_cfg;
13013
13014                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13015                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13016                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13017                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13018                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13019                     (ver > 0) && (ver < 0x100))
13020                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13021
13022                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13023                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13024
13025                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13026                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13027                         eeprom_phy_serdes = 1;
13028
13029                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13030                 if (nic_phy_id != 0) {
13031                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13032                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13033
13034                         eeprom_phy_id  = (id1 >> 16) << 10;
13035                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13036                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13037                 } else
13038                         eeprom_phy_id = 0;
13039
13040                 tp->phy_id = eeprom_phy_id;
13041                 if (eeprom_phy_serdes) {
13042                         if (!tg3_flag(tp, 5705_PLUS))
13043                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13044                         else
13045                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13046                 }
13047
13048                 if (tg3_flag(tp, 5750_PLUS))
13049                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13050                                     SHASTA_EXT_LED_MODE_MASK);
13051                 else
13052                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13053
13054                 switch (led_cfg) {
13055                 default:
13056                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13057                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13058                         break;
13059
13060                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13061                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13062                         break;
13063
13064                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13065                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13066
13067                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13068                          * read on some older 5700/5701 bootcode.
13069                          */
13070                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13071                             ASIC_REV_5700 ||
13072                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13073                             ASIC_REV_5701)
13074                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13075
13076                         break;
13077
13078                 case SHASTA_EXT_LED_SHARED:
13079                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13080                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13081                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13082                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13083                                                  LED_CTRL_MODE_PHY_2);
13084                         break;
13085
13086                 case SHASTA_EXT_LED_MAC:
13087                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13088                         break;
13089
13090                 case SHASTA_EXT_LED_COMBO:
13091                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13092                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13093                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13094                                                  LED_CTRL_MODE_PHY_2);
13095                         break;
13096
13097                 }
13098
13099                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13100                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13101                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13102                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13103
13104                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13105                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13106
13107                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13108                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13109                         if ((tp->pdev->subsystem_vendor ==
13110                              PCI_VENDOR_ID_ARIMA) &&
13111                             (tp->pdev->subsystem_device == 0x205a ||
13112                              tp->pdev->subsystem_device == 0x2063))
13113                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13114                 } else {
13115                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13116                         tg3_flag_set(tp, IS_NIC);
13117                 }
13118
13119                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13120                         tg3_flag_set(tp, ENABLE_ASF);
13121                         if (tg3_flag(tp, 5750_PLUS))
13122                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13123                 }
13124
13125                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13126                     tg3_flag(tp, 5750_PLUS))
13127                         tg3_flag_set(tp, ENABLE_APE);
13128
13129                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13130                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13131                         tg3_flag_clear(tp, WOL_CAP);
13132
13133                 if (tg3_flag(tp, WOL_CAP) &&
13134                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13135                         tg3_flag_set(tp, WOL_ENABLE);
13136                         device_set_wakeup_enable(&tp->pdev->dev, true);
13137                 }
13138
13139                 if (cfg2 & (1 << 17))
13140                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13141
13142                 /* serdes signal pre-emphasis in register 0x590 set by */
13143                 /* bootcode if bit 18 is set */
13144                 if (cfg2 & (1 << 18))
13145                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13146
13147                 if ((tg3_flag(tp, 57765_PLUS) ||
13148                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13149                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13150                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13151                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13152
13153                 if (tg3_flag(tp, PCI_EXPRESS) &&
13154                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13155                     !tg3_flag(tp, 57765_PLUS)) {
13156                         u32 cfg3;
13157
13158                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13159                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13160                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13161                 }
13162
13163                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13164                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13165                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13166                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13167                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13168                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13169         }
13170 done:
13171         if (tg3_flag(tp, WOL_CAP))
13172                 device_set_wakeup_enable(&tp->pdev->dev,
13173                                          tg3_flag(tp, WOL_ENABLE));
13174         else
13175                 device_set_wakeup_capable(&tp->pdev->dev, false);
13176 }
13177
13178 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13179 {
13180         int i;
13181         u32 val;
13182
13183         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13184         tw32(OTP_CTRL, cmd);
13185
13186         /* Wait for up to 1 ms for command to execute. */
13187         for (i = 0; i < 100; i++) {
13188                 val = tr32(OTP_STATUS);
13189                 if (val & OTP_STATUS_CMD_DONE)
13190                         break;
13191                 udelay(10);
13192         }
13193
13194         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13195 }
13196
13197 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13198  * configuration is a 32-bit value that straddles the alignment boundary.
13199  * We do two 32-bit reads and then shift and merge the results.
13200  */
13201 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13202 {
13203         u32 bhalf_otp, thalf_otp;
13204
13205         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13206
13207         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13208                 return 0;
13209
13210         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13211
13212         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13213                 return 0;
13214
13215         thalf_otp = tr32(OTP_READ_DATA);
13216
13217         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13218
13219         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13220                 return 0;
13221
13222         bhalf_otp = tr32(OTP_READ_DATA);
13223
13224         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13225 }
13226
13227 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13228 {
13229         u32 adv = ADVERTISED_Autoneg;
13230
13231         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13232                 adv |= ADVERTISED_1000baseT_Half |
13233                        ADVERTISED_1000baseT_Full;
13234
13235         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13236                 adv |= ADVERTISED_100baseT_Half |
13237                        ADVERTISED_100baseT_Full |
13238                        ADVERTISED_10baseT_Half |
13239                        ADVERTISED_10baseT_Full |
13240                        ADVERTISED_TP;
13241         else
13242                 adv |= ADVERTISED_FIBRE;
13243
13244         tp->link_config.advertising = adv;
13245         tp->link_config.speed = SPEED_INVALID;
13246         tp->link_config.duplex = DUPLEX_INVALID;
13247         tp->link_config.autoneg = AUTONEG_ENABLE;
13248         tp->link_config.active_speed = SPEED_INVALID;
13249         tp->link_config.active_duplex = DUPLEX_INVALID;
13250         tp->link_config.orig_speed = SPEED_INVALID;
13251         tp->link_config.orig_duplex = DUPLEX_INVALID;
13252         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13253 }
13254
13255 static int __devinit tg3_phy_probe(struct tg3 *tp)
13256 {
13257         u32 hw_phy_id_1, hw_phy_id_2;
13258         u32 hw_phy_id, hw_phy_id_masked;
13259         int err;
13260
13261         /* flow control autonegotiation is default behavior */
13262         tg3_flag_set(tp, PAUSE_AUTONEG);
13263         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13264
13265         if (tg3_flag(tp, USE_PHYLIB))
13266                 return tg3_phy_init(tp);
13267
13268         /* Reading the PHY ID register can conflict with ASF
13269          * firmware access to the PHY hardware.
13270          */
13271         err = 0;
13272         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13273                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13274         } else {
13275                 /* Now read the physical PHY_ID from the chip and verify
13276                  * that it is sane.  If it doesn't look good, we fall back
13277                  * to either the hard-coded table based PHY_ID and failing
13278                  * that the value found in the eeprom area.
13279                  */
13280                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13281                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13282
13283                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13284                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13285                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13286
13287                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13288         }
13289
13290         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13291                 tp->phy_id = hw_phy_id;
13292                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13293                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13294                 else
13295                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13296         } else {
13297                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13298                         /* Do nothing, phy ID already set up in
13299                          * tg3_get_eeprom_hw_cfg().
13300                          */
13301                 } else {
13302                         struct subsys_tbl_ent *p;
13303
13304                         /* No eeprom signature?  Try the hardcoded
13305                          * subsys device table.
13306                          */
13307                         p = tg3_lookup_by_subsys(tp);
13308                         if (!p)
13309                                 return -ENODEV;
13310
13311                         tp->phy_id = p->phy_id;
13312                         if (!tp->phy_id ||
13313                             tp->phy_id == TG3_PHY_ID_BCM8002)
13314                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13315                 }
13316         }
13317
13318         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13319             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13320              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13321              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13322               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13323              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13324               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13325                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13326
13327         tg3_phy_init_link_config(tp);
13328
13329         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13330             !tg3_flag(tp, ENABLE_APE) &&
13331             !tg3_flag(tp, ENABLE_ASF)) {
13332                 u32 bmsr, dummy;
13333
13334                 tg3_readphy(tp, MII_BMSR, &bmsr);
13335                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13336                     (bmsr & BMSR_LSTATUS))
13337                         goto skip_phy_reset;
13338
13339                 err = tg3_phy_reset(tp);
13340                 if (err)
13341                         return err;
13342
13343                 tg3_phy_set_wirespeed(tp);
13344
13345                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13346                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13347                                             tp->link_config.flowctrl);
13348
13349                         tg3_writephy(tp, MII_BMCR,
13350                                      BMCR_ANENABLE | BMCR_ANRESTART);
13351                 }
13352         }
13353
13354 skip_phy_reset:
13355         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13356                 err = tg3_init_5401phy_dsp(tp);
13357                 if (err)
13358                         return err;
13359
13360                 err = tg3_init_5401phy_dsp(tp);
13361         }
13362
13363         return err;
13364 }
13365
13366 static void __devinit tg3_read_vpd(struct tg3 *tp)
13367 {
13368         u8 *vpd_data;
13369         unsigned int block_end, rosize, len;
13370         u32 vpdlen;
13371         int j, i = 0;
13372
13373         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13374         if (!vpd_data)
13375                 goto out_no_vpd;
13376
13377         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13378         if (i < 0)
13379                 goto out_not_found;
13380
13381         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13382         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13383         i += PCI_VPD_LRDT_TAG_SIZE;
13384
13385         if (block_end > vpdlen)
13386                 goto out_not_found;
13387
13388         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13389                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13390         if (j > 0) {
13391                 len = pci_vpd_info_field_size(&vpd_data[j]);
13392
13393                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13394                 if (j + len > block_end || len != 4 ||
13395                     memcmp(&vpd_data[j], "1028", 4))
13396                         goto partno;
13397
13398                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13399                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13400                 if (j < 0)
13401                         goto partno;
13402
13403                 len = pci_vpd_info_field_size(&vpd_data[j]);
13404
13405                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13406                 if (j + len > block_end)
13407                         goto partno;
13408
13409                 memcpy(tp->fw_ver, &vpd_data[j], len);
13410                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13411         }
13412
13413 partno:
13414         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13415                                       PCI_VPD_RO_KEYWORD_PARTNO);
13416         if (i < 0)
13417                 goto out_not_found;
13418
13419         len = pci_vpd_info_field_size(&vpd_data[i]);
13420
13421         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13422         if (len > TG3_BPN_SIZE ||
13423             (len + i) > vpdlen)
13424                 goto out_not_found;
13425
13426         memcpy(tp->board_part_number, &vpd_data[i], len);
13427
13428 out_not_found:
13429         kfree(vpd_data);
13430         if (tp->board_part_number[0])
13431                 return;
13432
13433 out_no_vpd:
13434         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13435                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13436                         strcpy(tp->board_part_number, "BCM5717");
13437                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13438                         strcpy(tp->board_part_number, "BCM5718");
13439                 else
13440                         goto nomatch;
13441         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13442                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13443                         strcpy(tp->board_part_number, "BCM57780");
13444                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13445                         strcpy(tp->board_part_number, "BCM57760");
13446                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13447                         strcpy(tp->board_part_number, "BCM57790");
13448                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13449                         strcpy(tp->board_part_number, "BCM57788");
13450                 else
13451                         goto nomatch;
13452         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13453                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13454                         strcpy(tp->board_part_number, "BCM57761");
13455                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13456                         strcpy(tp->board_part_number, "BCM57765");
13457                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13458                         strcpy(tp->board_part_number, "BCM57781");
13459                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13460                         strcpy(tp->board_part_number, "BCM57785");
13461                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13462                         strcpy(tp->board_part_number, "BCM57791");
13463                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13464                         strcpy(tp->board_part_number, "BCM57795");
13465                 else
13466                         goto nomatch;
13467         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13468                 strcpy(tp->board_part_number, "BCM95906");
13469         } else {
13470 nomatch:
13471                 strcpy(tp->board_part_number, "none");
13472         }
13473 }
13474
13475 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13476 {
13477         u32 val;
13478
13479         if (tg3_nvram_read(tp, offset, &val) ||
13480             (val & 0xfc000000) != 0x0c000000 ||
13481             tg3_nvram_read(tp, offset + 4, &val) ||
13482             val != 0)
13483                 return 0;
13484
13485         return 1;
13486 }
13487
13488 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13489 {
13490         u32 val, offset, start, ver_offset;
13491         int i, dst_off;
13492         bool newver = false;
13493
13494         if (tg3_nvram_read(tp, 0xc, &offset) ||
13495             tg3_nvram_read(tp, 0x4, &start))
13496                 return;
13497
13498         offset = tg3_nvram_logical_addr(tp, offset);
13499
13500         if (tg3_nvram_read(tp, offset, &val))
13501                 return;
13502
13503         if ((val & 0xfc000000) == 0x0c000000) {
13504                 if (tg3_nvram_read(tp, offset + 4, &val))
13505                         return;
13506
13507                 if (val == 0)
13508                         newver = true;
13509         }
13510
13511         dst_off = strlen(tp->fw_ver);
13512
13513         if (newver) {
13514                 if (TG3_VER_SIZE - dst_off < 16 ||
13515                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13516                         return;
13517
13518                 offset = offset + ver_offset - start;
13519                 for (i = 0; i < 16; i += 4) {
13520                         __be32 v;
13521                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13522                                 return;
13523
13524                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13525                 }
13526         } else {
13527                 u32 major, minor;
13528
13529                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13530                         return;
13531
13532                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13533                         TG3_NVM_BCVER_MAJSFT;
13534                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13535                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13536                          "v%d.%02d", major, minor);
13537         }
13538 }
13539
13540 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13541 {
13542         u32 val, major, minor;
13543
13544         /* Use native endian representation */
13545         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13546                 return;
13547
13548         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13549                 TG3_NVM_HWSB_CFG1_MAJSFT;
13550         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13551                 TG3_NVM_HWSB_CFG1_MINSFT;
13552
13553         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13554 }
13555
13556 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13557 {
13558         u32 offset, major, minor, build;
13559
13560         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13561
13562         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13563                 return;
13564
13565         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13566         case TG3_EEPROM_SB_REVISION_0:
13567                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13568                 break;
13569         case TG3_EEPROM_SB_REVISION_2:
13570                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13571                 break;
13572         case TG3_EEPROM_SB_REVISION_3:
13573                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13574                 break;
13575         case TG3_EEPROM_SB_REVISION_4:
13576                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13577                 break;
13578         case TG3_EEPROM_SB_REVISION_5:
13579                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13580                 break;
13581         case TG3_EEPROM_SB_REVISION_6:
13582                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13583                 break;
13584         default:
13585                 return;
13586         }
13587
13588         if (tg3_nvram_read(tp, offset, &val))
13589                 return;
13590
13591         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13592                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13593         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13594                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13595         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13596
13597         if (minor > 99 || build > 26)
13598                 return;
13599
13600         offset = strlen(tp->fw_ver);
13601         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13602                  " v%d.%02d", major, minor);
13603
13604         if (build > 0) {
13605                 offset = strlen(tp->fw_ver);
13606                 if (offset < TG3_VER_SIZE - 1)
13607                         tp->fw_ver[offset] = 'a' + build - 1;
13608         }
13609 }
13610
13611 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13612 {
13613         u32 val, offset, start;
13614         int i, vlen;
13615
13616         for (offset = TG3_NVM_DIR_START;
13617              offset < TG3_NVM_DIR_END;
13618              offset += TG3_NVM_DIRENT_SIZE) {
13619                 if (tg3_nvram_read(tp, offset, &val))
13620                         return;
13621
13622                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13623                         break;
13624         }
13625
13626         if (offset == TG3_NVM_DIR_END)
13627                 return;
13628
13629         if (!tg3_flag(tp, 5705_PLUS))
13630                 start = 0x08000000;
13631         else if (tg3_nvram_read(tp, offset - 4, &start))
13632                 return;
13633
13634         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13635             !tg3_fw_img_is_valid(tp, offset) ||
13636             tg3_nvram_read(tp, offset + 8, &val))
13637                 return;
13638
13639         offset += val - start;
13640
13641         vlen = strlen(tp->fw_ver);
13642
13643         tp->fw_ver[vlen++] = ',';
13644         tp->fw_ver[vlen++] = ' ';
13645
13646         for (i = 0; i < 4; i++) {
13647                 __be32 v;
13648                 if (tg3_nvram_read_be32(tp, offset, &v))
13649                         return;
13650
13651                 offset += sizeof(v);
13652
13653                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13654                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13655                         break;
13656                 }
13657
13658                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13659                 vlen += sizeof(v);
13660         }
13661 }
13662
13663 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13664 {
13665         int vlen;
13666         u32 apedata;
13667         char *fwtype;
13668
13669         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13670                 return;
13671
13672         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13673         if (apedata != APE_SEG_SIG_MAGIC)
13674                 return;
13675
13676         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13677         if (!(apedata & APE_FW_STATUS_READY))
13678                 return;
13679
13680         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13681
13682         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13683                 tg3_flag_set(tp, APE_HAS_NCSI);
13684                 fwtype = "NCSI";
13685         } else {
13686                 fwtype = "DASH";
13687         }
13688
13689         vlen = strlen(tp->fw_ver);
13690
13691         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13692                  fwtype,
13693                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13694                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13695                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13696                  (apedata & APE_FW_VERSION_BLDMSK));
13697 }
13698
13699 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13700 {
13701         u32 val;
13702         bool vpd_vers = false;
13703
13704         if (tp->fw_ver[0] != 0)
13705                 vpd_vers = true;
13706
13707         if (tg3_flag(tp, NO_NVRAM)) {
13708                 strcat(tp->fw_ver, "sb");
13709                 return;
13710         }
13711
13712         if (tg3_nvram_read(tp, 0, &val))
13713                 return;
13714
13715         if (val == TG3_EEPROM_MAGIC)
13716                 tg3_read_bc_ver(tp);
13717         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13718                 tg3_read_sb_ver(tp, val);
13719         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13720                 tg3_read_hwsb_ver(tp);
13721         else
13722                 return;
13723
13724         if (vpd_vers)
13725                 goto done;
13726
13727         if (tg3_flag(tp, ENABLE_APE)) {
13728                 if (tg3_flag(tp, ENABLE_ASF))
13729                         tg3_read_dash_ver(tp);
13730         } else if (tg3_flag(tp, ENABLE_ASF)) {
13731                 tg3_read_mgmtfw_ver(tp);
13732         }
13733
13734 done:
13735         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13736 }
13737
13738 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13739
13740 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13741 {
13742         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13743                 return TG3_RX_RET_MAX_SIZE_5717;
13744         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13745                 return TG3_RX_RET_MAX_SIZE_5700;
13746         else
13747                 return TG3_RX_RET_MAX_SIZE_5705;
13748 }
13749
13750 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13751         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13752         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13753         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13754         { },
13755 };
13756
13757 static int __devinit tg3_get_invariants(struct tg3 *tp)
13758 {
13759         u32 misc_ctrl_reg;
13760         u32 pci_state_reg, grc_misc_cfg;
13761         u32 val;
13762         u16 pci_cmd;
13763         int err;
13764
13765         /* Force memory write invalidate off.  If we leave it on,
13766          * then on 5700_BX chips we have to enable a workaround.
13767          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13768          * to match the cacheline size.  The Broadcom driver have this
13769          * workaround but turns MWI off all the times so never uses
13770          * it.  This seems to suggest that the workaround is insufficient.
13771          */
13772         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13773         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13774         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13775
13776         /* Important! -- Make sure register accesses are byteswapped
13777          * correctly.  Also, for those chips that require it, make
13778          * sure that indirect register accesses are enabled before
13779          * the first operation.
13780          */
13781         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13782                               &misc_ctrl_reg);
13783         tp->misc_host_ctrl |= (misc_ctrl_reg &
13784                                MISC_HOST_CTRL_CHIPREV);
13785         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13786                                tp->misc_host_ctrl);
13787
13788         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13789                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13791                 u32 prod_id_asic_rev;
13792
13793                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13794                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13795                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13796                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13797                         pci_read_config_dword(tp->pdev,
13798                                               TG3PCI_GEN2_PRODID_ASICREV,
13799                                               &prod_id_asic_rev);
13800                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13801                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13802                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13803                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13804                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13805                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13806                         pci_read_config_dword(tp->pdev,
13807                                               TG3PCI_GEN15_PRODID_ASICREV,
13808                                               &prod_id_asic_rev);
13809                 else
13810                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13811                                               &prod_id_asic_rev);
13812
13813                 tp->pci_chip_rev_id = prod_id_asic_rev;
13814         }
13815
13816         /* Wrong chip ID in 5752 A0. This code can be removed later
13817          * as A0 is not in production.
13818          */
13819         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13820                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13821
13822         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13823          * we need to disable memory and use config. cycles
13824          * only to access all registers. The 5702/03 chips
13825          * can mistakenly decode the special cycles from the
13826          * ICH chipsets as memory write cycles, causing corruption
13827          * of register and memory space. Only certain ICH bridges
13828          * will drive special cycles with non-zero data during the
13829          * address phase which can fall within the 5703's address
13830          * range. This is not an ICH bug as the PCI spec allows
13831          * non-zero address during special cycles. However, only
13832          * these ICH bridges are known to drive non-zero addresses
13833          * during special cycles.
13834          *
13835          * Since special cycles do not cross PCI bridges, we only
13836          * enable this workaround if the 5703 is on the secondary
13837          * bus of these ICH bridges.
13838          */
13839         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13840             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13841                 static struct tg3_dev_id {
13842                         u32     vendor;
13843                         u32     device;
13844                         u32     rev;
13845                 } ich_chipsets[] = {
13846                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13847                           PCI_ANY_ID },
13848                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13849                           PCI_ANY_ID },
13850                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13851                           0xa },
13852                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13853                           PCI_ANY_ID },
13854                         { },
13855                 };
13856                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13857                 struct pci_dev *bridge = NULL;
13858
13859                 while (pci_id->vendor != 0) {
13860                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13861                                                 bridge);
13862                         if (!bridge) {
13863                                 pci_id++;
13864                                 continue;
13865                         }
13866                         if (pci_id->rev != PCI_ANY_ID) {
13867                                 if (bridge->revision > pci_id->rev)
13868                                         continue;
13869                         }
13870                         if (bridge->subordinate &&
13871                             (bridge->subordinate->number ==
13872                              tp->pdev->bus->number)) {
13873                                 tg3_flag_set(tp, ICH_WORKAROUND);
13874                                 pci_dev_put(bridge);
13875                                 break;
13876                         }
13877                 }
13878         }
13879
13880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13881                 static struct tg3_dev_id {
13882                         u32     vendor;
13883                         u32     device;
13884                 } bridge_chipsets[] = {
13885                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13886                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13887                         { },
13888                 };
13889                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13890                 struct pci_dev *bridge = NULL;
13891
13892                 while (pci_id->vendor != 0) {
13893                         bridge = pci_get_device(pci_id->vendor,
13894                                                 pci_id->device,
13895                                                 bridge);
13896                         if (!bridge) {
13897                                 pci_id++;
13898                                 continue;
13899                         }
13900                         if (bridge->subordinate &&
13901                             (bridge->subordinate->number <=
13902                              tp->pdev->bus->number) &&
13903                             (bridge->subordinate->subordinate >=
13904                              tp->pdev->bus->number)) {
13905                                 tg3_flag_set(tp, 5701_DMA_BUG);
13906                                 pci_dev_put(bridge);
13907                                 break;
13908                         }
13909                 }
13910         }
13911
13912         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13913          * DMA addresses > 40-bit. This bridge may have other additional
13914          * 57xx devices behind it in some 4-port NIC designs for example.
13915          * Any tg3 device found behind the bridge will also need the 40-bit
13916          * DMA workaround.
13917          */
13918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13919             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13920                 tg3_flag_set(tp, 5780_CLASS);
13921                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13922                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13923         } else {
13924                 struct pci_dev *bridge = NULL;
13925
13926                 do {
13927                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13928                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13929                                                 bridge);
13930                         if (bridge && bridge->subordinate &&
13931                             (bridge->subordinate->number <=
13932                              tp->pdev->bus->number) &&
13933                             (bridge->subordinate->subordinate >=
13934                              tp->pdev->bus->number)) {
13935                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13936                                 pci_dev_put(bridge);
13937                                 break;
13938                         }
13939                 } while (bridge);
13940         }
13941
13942         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13943             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13944                 tp->pdev_peer = tg3_find_peer(tp);
13945
13946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13947             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13948             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13949                 tg3_flag_set(tp, 5717_PLUS);
13950
13951         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13952             tg3_flag(tp, 5717_PLUS))
13953                 tg3_flag_set(tp, 57765_PLUS);
13954
13955         /* Intentionally exclude ASIC_REV_5906 */
13956         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13957             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13959             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13960             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13961             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13962             tg3_flag(tp, 57765_PLUS))
13963                 tg3_flag_set(tp, 5755_PLUS);
13964
13965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13966             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13967             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13968             tg3_flag(tp, 5755_PLUS) ||
13969             tg3_flag(tp, 5780_CLASS))
13970                 tg3_flag_set(tp, 5750_PLUS);
13971
13972         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13973             tg3_flag(tp, 5750_PLUS))
13974                 tg3_flag_set(tp, 5705_PLUS);
13975
13976         /* Determine TSO capabilities */
13977         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13978                 ; /* Do nothing. HW bug. */
13979         else if (tg3_flag(tp, 57765_PLUS))
13980                 tg3_flag_set(tp, HW_TSO_3);
13981         else if (tg3_flag(tp, 5755_PLUS) ||
13982                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13983                 tg3_flag_set(tp, HW_TSO_2);
13984         else if (tg3_flag(tp, 5750_PLUS)) {
13985                 tg3_flag_set(tp, HW_TSO_1);
13986                 tg3_flag_set(tp, TSO_BUG);
13987                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13988                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13989                         tg3_flag_clear(tp, TSO_BUG);
13990         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13991                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13992                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13993                         tg3_flag_set(tp, TSO_BUG);
13994                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13995                         tp->fw_needed = FIRMWARE_TG3TSO5;
13996                 else
13997                         tp->fw_needed = FIRMWARE_TG3TSO;
13998         }
13999
14000         /* Selectively allow TSO based on operating conditions */
14001         if (tg3_flag(tp, HW_TSO_1) ||
14002             tg3_flag(tp, HW_TSO_2) ||
14003             tg3_flag(tp, HW_TSO_3) ||
14004             tp->fw_needed) {
14005                 /* For firmware TSO, assume ASF is disabled.
14006                  * We'll disable TSO later if we discover ASF
14007                  * is enabled in tg3_get_eeprom_hw_cfg().
14008                  */
14009                 tg3_flag_set(tp, TSO_CAPABLE);
14010         } else {
14011                 tg3_flag_clear(tp, TSO_CAPABLE);
14012                 tg3_flag_clear(tp, TSO_BUG);
14013                 tp->fw_needed = NULL;
14014         }
14015
14016         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14017                 tp->fw_needed = FIRMWARE_TG3;
14018
14019         tp->irq_max = 1;
14020
14021         if (tg3_flag(tp, 5750_PLUS)) {
14022                 tg3_flag_set(tp, SUPPORT_MSI);
14023                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14024                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14025                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14026                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14027                      tp->pdev_peer == tp->pdev))
14028                         tg3_flag_clear(tp, SUPPORT_MSI);
14029
14030                 if (tg3_flag(tp, 5755_PLUS) ||
14031                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14032                         tg3_flag_set(tp, 1SHOT_MSI);
14033                 }
14034
14035                 if (tg3_flag(tp, 57765_PLUS)) {
14036                         tg3_flag_set(tp, SUPPORT_MSIX);
14037                         tp->irq_max = TG3_IRQ_MAX_VECS;
14038                 }
14039         }
14040
14041         if (tg3_flag(tp, 5755_PLUS))
14042                 tg3_flag_set(tp, SHORT_DMA_BUG);
14043
14044         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14045                 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14046
14047         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14048             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14049             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14050                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14051
14052         if (tg3_flag(tp, 57765_PLUS) &&
14053             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14054                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14055
14056         if (!tg3_flag(tp, 5705_PLUS) ||
14057             tg3_flag(tp, 5780_CLASS) ||
14058             tg3_flag(tp, USE_JUMBO_BDFLAG))
14059                 tg3_flag_set(tp, JUMBO_CAPABLE);
14060
14061         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14062                               &pci_state_reg);
14063
14064         if (pci_is_pcie(tp->pdev)) {
14065                 u16 lnkctl;
14066
14067                 tg3_flag_set(tp, PCI_EXPRESS);
14068
14069                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14070                         int readrq = pcie_get_readrq(tp->pdev);
14071                         if (readrq > 2048)
14072                                 pcie_set_readrq(tp->pdev, 2048);
14073                 }
14074
14075                 pci_read_config_word(tp->pdev,
14076                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14077                                      &lnkctl);
14078                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14079                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14080                             ASIC_REV_5906) {
14081                                 tg3_flag_clear(tp, HW_TSO_2);
14082                                 tg3_flag_clear(tp, TSO_CAPABLE);
14083                         }
14084                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14085                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14086                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14087                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14088                                 tg3_flag_set(tp, CLKREQ_BUG);
14089                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14090                         tg3_flag_set(tp, L1PLLPD_EN);
14091                 }
14092         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14093                 /* BCM5785 devices are effectively PCIe devices, and should
14094                  * follow PCIe codepaths, but do not have a PCIe capabilities
14095                  * section.
14096                  */
14097                 tg3_flag_set(tp, PCI_EXPRESS);
14098         } else if (!tg3_flag(tp, 5705_PLUS) ||
14099                    tg3_flag(tp, 5780_CLASS)) {
14100                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14101                 if (!tp->pcix_cap) {
14102                         dev_err(&tp->pdev->dev,
14103                                 "Cannot find PCI-X capability, aborting\n");
14104                         return -EIO;
14105                 }
14106
14107                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14108                         tg3_flag_set(tp, PCIX_MODE);
14109         }
14110
14111         /* If we have an AMD 762 or VIA K8T800 chipset, write
14112          * reordering to the mailbox registers done by the host
14113          * controller can cause major troubles.  We read back from
14114          * every mailbox register write to force the writes to be
14115          * posted to the chip in order.
14116          */
14117         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14118             !tg3_flag(tp, PCI_EXPRESS))
14119                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14120
14121         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14122                              &tp->pci_cacheline_sz);
14123         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14124                              &tp->pci_lat_timer);
14125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14126             tp->pci_lat_timer < 64) {
14127                 tp->pci_lat_timer = 64;
14128                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14129                                       tp->pci_lat_timer);
14130         }
14131
14132         /* Important! -- It is critical that the PCI-X hw workaround
14133          * situation is decided before the first MMIO register access.
14134          */
14135         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14136                 /* 5700 BX chips need to have their TX producer index
14137                  * mailboxes written twice to workaround a bug.
14138                  */
14139                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14140
14141                 /* If we are in PCI-X mode, enable register write workaround.
14142                  *
14143                  * The workaround is to use indirect register accesses
14144                  * for all chip writes not to mailbox registers.
14145                  */
14146                 if (tg3_flag(tp, PCIX_MODE)) {
14147                         u32 pm_reg;
14148
14149                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14150
14151                         /* The chip can have it's power management PCI config
14152                          * space registers clobbered due to this bug.
14153                          * So explicitly force the chip into D0 here.
14154                          */
14155                         pci_read_config_dword(tp->pdev,
14156                                               tp->pm_cap + PCI_PM_CTRL,
14157                                               &pm_reg);
14158                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14159                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14160                         pci_write_config_dword(tp->pdev,
14161                                                tp->pm_cap + PCI_PM_CTRL,
14162                                                pm_reg);
14163
14164                         /* Also, force SERR#/PERR# in PCI command. */
14165                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14166                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14167                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14168                 }
14169         }
14170
14171         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14172                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14173         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14174                 tg3_flag_set(tp, PCI_32BIT);
14175
14176         /* Chip-specific fixup from Broadcom driver */
14177         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14178             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14179                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14180                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14181         }
14182
14183         /* Default fast path register access methods */
14184         tp->read32 = tg3_read32;
14185         tp->write32 = tg3_write32;
14186         tp->read32_mbox = tg3_read32;
14187         tp->write32_mbox = tg3_write32;
14188         tp->write32_tx_mbox = tg3_write32;
14189         tp->write32_rx_mbox = tg3_write32;
14190
14191         /* Various workaround register access methods */
14192         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14193                 tp->write32 = tg3_write_indirect_reg32;
14194         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14195                  (tg3_flag(tp, PCI_EXPRESS) &&
14196                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14197                 /*
14198                  * Back to back register writes can cause problems on these
14199                  * chips, the workaround is to read back all reg writes
14200                  * except those to mailbox regs.
14201                  *
14202                  * See tg3_write_indirect_reg32().
14203                  */
14204                 tp->write32 = tg3_write_flush_reg32;
14205         }
14206
14207         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14208                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14209                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14210                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14211         }
14212
14213         if (tg3_flag(tp, ICH_WORKAROUND)) {
14214                 tp->read32 = tg3_read_indirect_reg32;
14215                 tp->write32 = tg3_write_indirect_reg32;
14216                 tp->read32_mbox = tg3_read_indirect_mbox;
14217                 tp->write32_mbox = tg3_write_indirect_mbox;
14218                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14219                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14220
14221                 iounmap(tp->regs);
14222                 tp->regs = NULL;
14223
14224                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14225                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14226                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14227         }
14228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14229                 tp->read32_mbox = tg3_read32_mbox_5906;
14230                 tp->write32_mbox = tg3_write32_mbox_5906;
14231                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14232                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14233         }
14234
14235         if (tp->write32 == tg3_write_indirect_reg32 ||
14236             (tg3_flag(tp, PCIX_MODE) &&
14237              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14238               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14239                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14240
14241         /* The memory arbiter has to be enabled in order for SRAM accesses
14242          * to succeed.  Normally on powerup the tg3 chip firmware will make
14243          * sure it is enabled, but other entities such as system netboot
14244          * code might disable it.
14245          */
14246         val = tr32(MEMARB_MODE);
14247         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14248
14249         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14250         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14251             tg3_flag(tp, 5780_CLASS)) {
14252                 if (tg3_flag(tp, PCIX_MODE)) {
14253                         pci_read_config_dword(tp->pdev,
14254                                               tp->pcix_cap + PCI_X_STATUS,
14255                                               &val);
14256                         tp->pci_fn = val & 0x7;
14257                 }
14258         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14259                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14260                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14261                     NIC_SRAM_CPMUSTAT_SIG) {
14262                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14263                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14264                 }
14265         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14266                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14267                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14268                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14269                     NIC_SRAM_CPMUSTAT_SIG) {
14270                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14271                                      TG3_CPMU_STATUS_FSHFT_5719;
14272                 }
14273         }
14274
14275         /* Get eeprom hw config before calling tg3_set_power_state().
14276          * In particular, the TG3_FLAG_IS_NIC flag must be
14277          * determined before calling tg3_set_power_state() so that
14278          * we know whether or not to switch out of Vaux power.
14279          * When the flag is set, it means that GPIO1 is used for eeprom
14280          * write protect and also implies that it is a LOM where GPIOs
14281          * are not used to switch power.
14282          */
14283         tg3_get_eeprom_hw_cfg(tp);
14284
14285         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14286                 tg3_flag_clear(tp, TSO_CAPABLE);
14287                 tg3_flag_clear(tp, TSO_BUG);
14288                 tp->fw_needed = NULL;
14289         }
14290
14291         if (tg3_flag(tp, ENABLE_APE)) {
14292                 /* Allow reads and writes to the
14293                  * APE register and memory space.
14294                  */
14295                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14296                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14297                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14298                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14299                                        pci_state_reg);
14300
14301                 tg3_ape_lock_init(tp);
14302         }
14303
14304         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14305             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14306             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14307             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14308             tg3_flag(tp, 57765_PLUS))
14309                 tg3_flag_set(tp, CPMU_PRESENT);
14310
14311         /* Set up tp->grc_local_ctrl before calling
14312          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14313          * will bring 5700's external PHY out of reset.
14314          * It is also used as eeprom write protect on LOMs.
14315          */
14316         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14318             tg3_flag(tp, EEPROM_WRITE_PROT))
14319                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14320                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14321         /* Unused GPIO3 must be driven as output on 5752 because there
14322          * are no pull-up resistors on unused GPIO pins.
14323          */
14324         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14325                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14326
14327         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14328             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14329             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14330                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14331
14332         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14333             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14334                 /* Turn off the debug UART. */
14335                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14336                 if (tg3_flag(tp, IS_NIC))
14337                         /* Keep VMain power. */
14338                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14339                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14340         }
14341
14342         /* Switch out of Vaux if it is a NIC */
14343         tg3_pwrsrc_switch_to_vmain(tp);
14344
14345         /* Derive initial jumbo mode from MTU assigned in
14346          * ether_setup() via the alloc_etherdev() call
14347          */
14348         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14349                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14350
14351         /* Determine WakeOnLan speed to use. */
14352         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14353             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14354             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14355             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14356                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14357         } else {
14358                 tg3_flag_set(tp, WOL_SPEED_100MB);
14359         }
14360
14361         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14362                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14363
14364         /* A few boards don't want Ethernet@WireSpeed phy feature */
14365         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14366             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14367              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14368              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14369             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14370             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14371                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14372
14373         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14374             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14375                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14376         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14377                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14378
14379         if (tg3_flag(tp, 5705_PLUS) &&
14380             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14381             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14382             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14383             !tg3_flag(tp, 57765_PLUS)) {
14384                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14385                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14386                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14387                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14388                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14389                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14390                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14391                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14392                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14393                 } else
14394                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14395         }
14396
14397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14398             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14399                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14400                 if (tp->phy_otp == 0)
14401                         tp->phy_otp = TG3_OTP_DEFAULT;
14402         }
14403
14404         if (tg3_flag(tp, CPMU_PRESENT))
14405                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14406         else
14407                 tp->mi_mode = MAC_MI_MODE_BASE;
14408
14409         tp->coalesce_mode = 0;
14410         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14411             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14412                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14413
14414         /* Set these bits to enable statistics workaround. */
14415         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14416             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14417             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14418                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14419                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14420         }
14421
14422         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14423             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14424                 tg3_flag_set(tp, USE_PHYLIB);
14425
14426         err = tg3_mdio_init(tp);
14427         if (err)
14428                 return err;
14429
14430         /* Initialize data/descriptor byte/word swapping. */
14431         val = tr32(GRC_MODE);
14432         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14433                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14434                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14435                         GRC_MODE_B2HRX_ENABLE |
14436                         GRC_MODE_HTX2B_ENABLE |
14437                         GRC_MODE_HOST_STACKUP);
14438         else
14439                 val &= GRC_MODE_HOST_STACKUP;
14440
14441         tw32(GRC_MODE, val | tp->grc_mode);
14442
14443         tg3_switch_clocks(tp);
14444
14445         /* Clear this out for sanity. */
14446         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14447
14448         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14449                               &pci_state_reg);
14450         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14451             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14452                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14453
14454                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14455                     chiprevid == CHIPREV_ID_5701_B0 ||
14456                     chiprevid == CHIPREV_ID_5701_B2 ||
14457                     chiprevid == CHIPREV_ID_5701_B5) {
14458                         void __iomem *sram_base;
14459
14460                         /* Write some dummy words into the SRAM status block
14461                          * area, see if it reads back correctly.  If the return
14462                          * value is bad, force enable the PCIX workaround.
14463                          */
14464                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14465
14466                         writel(0x00000000, sram_base);
14467                         writel(0x00000000, sram_base + 4);
14468                         writel(0xffffffff, sram_base + 4);
14469                         if (readl(sram_base) != 0x00000000)
14470                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14471                 }
14472         }
14473
14474         udelay(50);
14475         tg3_nvram_init(tp);
14476
14477         grc_misc_cfg = tr32(GRC_MISC_CFG);
14478         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14479
14480         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14481             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14482              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14483                 tg3_flag_set(tp, IS_5788);
14484
14485         if (!tg3_flag(tp, IS_5788) &&
14486             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14487                 tg3_flag_set(tp, TAGGED_STATUS);
14488         if (tg3_flag(tp, TAGGED_STATUS)) {
14489                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14490                                       HOSTCC_MODE_CLRTICK_TXBD);
14491
14492                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14493                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14494                                        tp->misc_host_ctrl);
14495         }
14496
14497         /* Preserve the APE MAC_MODE bits */
14498         if (tg3_flag(tp, ENABLE_APE))
14499                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14500         else
14501                 tp->mac_mode = 0;
14502
14503         /* these are limited to 10/100 only */
14504         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14505              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14506             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14507              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14508              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14509               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14510               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14511             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14512              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14513               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14514               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14515             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14516             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14517             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14518             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14519                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14520
14521         err = tg3_phy_probe(tp);
14522         if (err) {
14523                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14524                 /* ... but do not return immediately ... */
14525                 tg3_mdio_fini(tp);
14526         }
14527
14528         tg3_read_vpd(tp);
14529         tg3_read_fw_ver(tp);
14530
14531         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14532                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14533         } else {
14534                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14535                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14536                 else
14537                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14538         }
14539
14540         /* 5700 {AX,BX} chips have a broken status block link
14541          * change bit implementation, so we must use the
14542          * status register in those cases.
14543          */
14544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14545                 tg3_flag_set(tp, USE_LINKCHG_REG);
14546         else
14547                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14548
14549         /* The led_ctrl is set during tg3_phy_probe, here we might
14550          * have to force the link status polling mechanism based
14551          * upon subsystem IDs.
14552          */
14553         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14554             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14555             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14556                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14557                 tg3_flag_set(tp, USE_LINKCHG_REG);
14558         }
14559
14560         /* For all SERDES we poll the MAC status register. */
14561         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14562                 tg3_flag_set(tp, POLL_SERDES);
14563         else
14564                 tg3_flag_clear(tp, POLL_SERDES);
14565
14566         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14567         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14569             tg3_flag(tp, PCIX_MODE)) {
14570                 tp->rx_offset = NET_SKB_PAD;
14571 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14572                 tp->rx_copy_thresh = ~(u16)0;
14573 #endif
14574         }
14575
14576         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14577         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14578         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14579
14580         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14581
14582         /* Increment the rx prod index on the rx std ring by at most
14583          * 8 for these chips to workaround hw errata.
14584          */
14585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14586             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14587             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14588                 tp->rx_std_max_post = 8;
14589
14590         if (tg3_flag(tp, ASPM_WORKAROUND))
14591                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14592                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14593
14594         return err;
14595 }
14596
14597 #ifdef CONFIG_SPARC
14598 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14599 {
14600         struct net_device *dev = tp->dev;
14601         struct pci_dev *pdev = tp->pdev;
14602         struct device_node *dp = pci_device_to_OF_node(pdev);
14603         const unsigned char *addr;
14604         int len;
14605
14606         addr = of_get_property(dp, "local-mac-address", &len);
14607         if (addr && len == 6) {
14608                 memcpy(dev->dev_addr, addr, 6);
14609                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14610                 return 0;
14611         }
14612         return -ENODEV;
14613 }
14614
14615 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14616 {
14617         struct net_device *dev = tp->dev;
14618
14619         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14620         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14621         return 0;
14622 }
14623 #endif
14624
14625 static int __devinit tg3_get_device_address(struct tg3 *tp)
14626 {
14627         struct net_device *dev = tp->dev;
14628         u32 hi, lo, mac_offset;
14629         int addr_ok = 0;
14630
14631 #ifdef CONFIG_SPARC
14632         if (!tg3_get_macaddr_sparc(tp))
14633                 return 0;
14634 #endif
14635
14636         mac_offset = 0x7c;
14637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14638             tg3_flag(tp, 5780_CLASS)) {
14639                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14640                         mac_offset = 0xcc;
14641                 if (tg3_nvram_lock(tp))
14642                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14643                 else
14644                         tg3_nvram_unlock(tp);
14645         } else if (tg3_flag(tp, 5717_PLUS)) {
14646                 if (tp->pci_fn & 1)
14647                         mac_offset = 0xcc;
14648                 if (tp->pci_fn > 1)
14649                         mac_offset += 0x18c;
14650         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14651                 mac_offset = 0x10;
14652
14653         /* First try to get it from MAC address mailbox. */
14654         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14655         if ((hi >> 16) == 0x484b) {
14656                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14657                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14658
14659                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14660                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14661                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14662                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14663                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14664
14665                 /* Some old bootcode may report a 0 MAC address in SRAM */
14666                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14667         }
14668         if (!addr_ok) {
14669                 /* Next, try NVRAM. */
14670                 if (!tg3_flag(tp, NO_NVRAM) &&
14671                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14672                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14673                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14674                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14675                 }
14676                 /* Finally just fetch it out of the MAC control regs. */
14677                 else {
14678                         hi = tr32(MAC_ADDR_0_HIGH);
14679                         lo = tr32(MAC_ADDR_0_LOW);
14680
14681                         dev->dev_addr[5] = lo & 0xff;
14682                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14683                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14684                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14685                         dev->dev_addr[1] = hi & 0xff;
14686                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14687                 }
14688         }
14689
14690         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14691 #ifdef CONFIG_SPARC
14692                 if (!tg3_get_default_macaddr_sparc(tp))
14693                         return 0;
14694 #endif
14695                 return -EINVAL;
14696         }
14697         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14698         return 0;
14699 }
14700
14701 #define BOUNDARY_SINGLE_CACHELINE       1
14702 #define BOUNDARY_MULTI_CACHELINE        2
14703
14704 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14705 {
14706         int cacheline_size;
14707         u8 byte;
14708         int goal;
14709
14710         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14711         if (byte == 0)
14712                 cacheline_size = 1024;
14713         else
14714                 cacheline_size = (int) byte * 4;
14715
14716         /* On 5703 and later chips, the boundary bits have no
14717          * effect.
14718          */
14719         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14720             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14721             !tg3_flag(tp, PCI_EXPRESS))
14722                 goto out;
14723
14724 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14725         goal = BOUNDARY_MULTI_CACHELINE;
14726 #else
14727 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14728         goal = BOUNDARY_SINGLE_CACHELINE;
14729 #else
14730         goal = 0;
14731 #endif
14732 #endif
14733
14734         if (tg3_flag(tp, 57765_PLUS)) {
14735                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14736                 goto out;
14737         }
14738
14739         if (!goal)
14740                 goto out;
14741
14742         /* PCI controllers on most RISC systems tend to disconnect
14743          * when a device tries to burst across a cache-line boundary.
14744          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14745          *
14746          * Unfortunately, for PCI-E there are only limited
14747          * write-side controls for this, and thus for reads
14748          * we will still get the disconnects.  We'll also waste
14749          * these PCI cycles for both read and write for chips
14750          * other than 5700 and 5701 which do not implement the
14751          * boundary bits.
14752          */
14753         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14754                 switch (cacheline_size) {
14755                 case 16:
14756                 case 32:
14757                 case 64:
14758                 case 128:
14759                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14760                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14761                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14762                         } else {
14763                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14764                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14765                         }
14766                         break;
14767
14768                 case 256:
14769                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14770                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14771                         break;
14772
14773                 default:
14774                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14775                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14776                         break;
14777                 }
14778         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14779                 switch (cacheline_size) {
14780                 case 16:
14781                 case 32:
14782                 case 64:
14783                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14784                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14785                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14786                                 break;
14787                         }
14788                         /* fallthrough */
14789                 case 128:
14790                 default:
14791                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14792                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14793                         break;
14794                 }
14795         } else {
14796                 switch (cacheline_size) {
14797                 case 16:
14798                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14799                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14800                                         DMA_RWCTRL_WRITE_BNDRY_16);
14801                                 break;
14802                         }
14803                         /* fallthrough */
14804                 case 32:
14805                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14806                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14807                                         DMA_RWCTRL_WRITE_BNDRY_32);
14808                                 break;
14809                         }
14810                         /* fallthrough */
14811                 case 64:
14812                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14813                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14814                                         DMA_RWCTRL_WRITE_BNDRY_64);
14815                                 break;
14816                         }
14817                         /* fallthrough */
14818                 case 128:
14819                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14820                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14821                                         DMA_RWCTRL_WRITE_BNDRY_128);
14822                                 break;
14823                         }
14824                         /* fallthrough */
14825                 case 256:
14826                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14827                                 DMA_RWCTRL_WRITE_BNDRY_256);
14828                         break;
14829                 case 512:
14830                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14831                                 DMA_RWCTRL_WRITE_BNDRY_512);
14832                         break;
14833                 case 1024:
14834                 default:
14835                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14836                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14837                         break;
14838                 }
14839         }
14840
14841 out:
14842         return val;
14843 }
14844
14845 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14846 {
14847         struct tg3_internal_buffer_desc test_desc;
14848         u32 sram_dma_descs;
14849         int i, ret;
14850
14851         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14852
14853         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14854         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14855         tw32(RDMAC_STATUS, 0);
14856         tw32(WDMAC_STATUS, 0);
14857
14858         tw32(BUFMGR_MODE, 0);
14859         tw32(FTQ_RESET, 0);
14860
14861         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14862         test_desc.addr_lo = buf_dma & 0xffffffff;
14863         test_desc.nic_mbuf = 0x00002100;
14864         test_desc.len = size;
14865
14866         /*
14867          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14868          * the *second* time the tg3 driver was getting loaded after an
14869          * initial scan.
14870          *
14871          * Broadcom tells me:
14872          *   ...the DMA engine is connected to the GRC block and a DMA
14873          *   reset may affect the GRC block in some unpredictable way...
14874          *   The behavior of resets to individual blocks has not been tested.
14875          *
14876          * Broadcom noted the GRC reset will also reset all sub-components.
14877          */
14878         if (to_device) {
14879                 test_desc.cqid_sqid = (13 << 8) | 2;
14880
14881                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14882                 udelay(40);
14883         } else {
14884                 test_desc.cqid_sqid = (16 << 8) | 7;
14885
14886                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14887                 udelay(40);
14888         }
14889         test_desc.flags = 0x00000005;
14890
14891         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14892                 u32 val;
14893
14894                 val = *(((u32 *)&test_desc) + i);
14895                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14896                                        sram_dma_descs + (i * sizeof(u32)));
14897                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14898         }
14899         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14900
14901         if (to_device)
14902                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14903         else
14904                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14905
14906         ret = -ENODEV;
14907         for (i = 0; i < 40; i++) {
14908                 u32 val;
14909
14910                 if (to_device)
14911                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14912                 else
14913                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14914                 if ((val & 0xffff) == sram_dma_descs) {
14915                         ret = 0;
14916                         break;
14917                 }
14918
14919                 udelay(100);
14920         }
14921
14922         return ret;
14923 }
14924
14925 #define TEST_BUFFER_SIZE        0x2000
14926
14927 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14928         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14929         { },
14930 };
14931
14932 static int __devinit tg3_test_dma(struct tg3 *tp)
14933 {
14934         dma_addr_t buf_dma;
14935         u32 *buf, saved_dma_rwctrl;
14936         int ret = 0;
14937
14938         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14939                                  &buf_dma, GFP_KERNEL);
14940         if (!buf) {
14941                 ret = -ENOMEM;
14942                 goto out_nofree;
14943         }
14944
14945         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14946                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14947
14948         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14949
14950         if (tg3_flag(tp, 57765_PLUS))
14951                 goto out;
14952
14953         if (tg3_flag(tp, PCI_EXPRESS)) {
14954                 /* DMA read watermark not used on PCIE */
14955                 tp->dma_rwctrl |= 0x00180000;
14956         } else if (!tg3_flag(tp, PCIX_MODE)) {
14957                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14958                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14959                         tp->dma_rwctrl |= 0x003f0000;
14960                 else
14961                         tp->dma_rwctrl |= 0x003f000f;
14962         } else {
14963                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14964                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14965                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14966                         u32 read_water = 0x7;
14967
14968                         /* If the 5704 is behind the EPB bridge, we can
14969                          * do the less restrictive ONE_DMA workaround for
14970                          * better performance.
14971                          */
14972                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14973                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14974                                 tp->dma_rwctrl |= 0x8000;
14975                         else if (ccval == 0x6 || ccval == 0x7)
14976                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14977
14978                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14979                                 read_water = 4;
14980                         /* Set bit 23 to enable PCIX hw bug fix */
14981                         tp->dma_rwctrl |=
14982                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14983                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14984                                 (1 << 23);
14985                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14986                         /* 5780 always in PCIX mode */
14987                         tp->dma_rwctrl |= 0x00144000;
14988                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14989                         /* 5714 always in PCIX mode */
14990                         tp->dma_rwctrl |= 0x00148000;
14991                 } else {
14992                         tp->dma_rwctrl |= 0x001b000f;
14993                 }
14994         }
14995
14996         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14997             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14998                 tp->dma_rwctrl &= 0xfffffff0;
14999
15000         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15001             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15002                 /* Remove this if it causes problems for some boards. */
15003                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15004
15005                 /* On 5700/5701 chips, we need to set this bit.
15006                  * Otherwise the chip will issue cacheline transactions
15007                  * to streamable DMA memory with not all the byte
15008                  * enables turned on.  This is an error on several
15009                  * RISC PCI controllers, in particular sparc64.
15010                  *
15011                  * On 5703/5704 chips, this bit has been reassigned
15012                  * a different meaning.  In particular, it is used
15013                  * on those chips to enable a PCI-X workaround.
15014                  */
15015                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15016         }
15017
15018         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15019
15020 #if 0
15021         /* Unneeded, already done by tg3_get_invariants.  */
15022         tg3_switch_clocks(tp);
15023 #endif
15024
15025         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15026             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15027                 goto out;
15028
15029         /* It is best to perform DMA test with maximum write burst size
15030          * to expose the 5700/5701 write DMA bug.
15031          */
15032         saved_dma_rwctrl = tp->dma_rwctrl;
15033         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15034         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15035
15036         while (1) {
15037                 u32 *p = buf, i;
15038
15039                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15040                         p[i] = i;
15041
15042                 /* Send the buffer to the chip. */
15043                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15044                 if (ret) {
15045                         dev_err(&tp->pdev->dev,
15046                                 "%s: Buffer write failed. err = %d\n",
15047                                 __func__, ret);
15048                         break;
15049                 }
15050
15051 #if 0
15052                 /* validate data reached card RAM correctly. */
15053                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15054                         u32 val;
15055                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15056                         if (le32_to_cpu(val) != p[i]) {
15057                                 dev_err(&tp->pdev->dev,
15058                                         "%s: Buffer corrupted on device! "
15059                                         "(%d != %d)\n", __func__, val, i);
15060                                 /* ret = -ENODEV here? */
15061                         }
15062                         p[i] = 0;
15063                 }
15064 #endif
15065                 /* Now read it back. */
15066                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15067                 if (ret) {
15068                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15069                                 "err = %d\n", __func__, ret);
15070                         break;
15071                 }
15072
15073                 /* Verify it. */
15074                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15075                         if (p[i] == i)
15076                                 continue;
15077
15078                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15079                             DMA_RWCTRL_WRITE_BNDRY_16) {
15080                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15081                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15082                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15083                                 break;
15084                         } else {
15085                                 dev_err(&tp->pdev->dev,
15086                                         "%s: Buffer corrupted on read back! "
15087                                         "(%d != %d)\n", __func__, p[i], i);
15088                                 ret = -ENODEV;
15089                                 goto out;
15090                         }
15091                 }
15092
15093                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15094                         /* Success. */
15095                         ret = 0;
15096                         break;
15097                 }
15098         }
15099         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15100             DMA_RWCTRL_WRITE_BNDRY_16) {
15101                 /* DMA test passed without adjusting DMA boundary,
15102                  * now look for chipsets that are known to expose the
15103                  * DMA bug without failing the test.
15104                  */
15105                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15106                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15107                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15108                 } else {
15109                         /* Safe to use the calculated DMA boundary. */
15110                         tp->dma_rwctrl = saved_dma_rwctrl;
15111                 }
15112
15113                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15114         }
15115
15116 out:
15117         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15118 out_nofree:
15119         return ret;
15120 }
15121
15122 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15123 {
15124         if (tg3_flag(tp, 57765_PLUS)) {
15125                 tp->bufmgr_config.mbuf_read_dma_low_water =
15126                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15127                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15128                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15129                 tp->bufmgr_config.mbuf_high_water =
15130                         DEFAULT_MB_HIGH_WATER_57765;
15131
15132                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15133                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15134                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15135                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15136                 tp->bufmgr_config.mbuf_high_water_jumbo =
15137                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15138         } else if (tg3_flag(tp, 5705_PLUS)) {
15139                 tp->bufmgr_config.mbuf_read_dma_low_water =
15140                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15141                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15142                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15143                 tp->bufmgr_config.mbuf_high_water =
15144                         DEFAULT_MB_HIGH_WATER_5705;
15145                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15146                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15147                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15148                         tp->bufmgr_config.mbuf_high_water =
15149                                 DEFAULT_MB_HIGH_WATER_5906;
15150                 }
15151
15152                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15153                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15154                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15155                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15156                 tp->bufmgr_config.mbuf_high_water_jumbo =
15157                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15158         } else {
15159                 tp->bufmgr_config.mbuf_read_dma_low_water =
15160                         DEFAULT_MB_RDMA_LOW_WATER;
15161                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15162                         DEFAULT_MB_MACRX_LOW_WATER;
15163                 tp->bufmgr_config.mbuf_high_water =
15164                         DEFAULT_MB_HIGH_WATER;
15165
15166                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15167                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15168                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15169                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15170                 tp->bufmgr_config.mbuf_high_water_jumbo =
15171                         DEFAULT_MB_HIGH_WATER_JUMBO;
15172         }
15173
15174         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15175         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15176 }
15177
15178 static char * __devinit tg3_phy_string(struct tg3 *tp)
15179 {
15180         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15181         case TG3_PHY_ID_BCM5400:        return "5400";
15182         case TG3_PHY_ID_BCM5401:        return "5401";
15183         case TG3_PHY_ID_BCM5411:        return "5411";
15184         case TG3_PHY_ID_BCM5701:        return "5701";
15185         case TG3_PHY_ID_BCM5703:        return "5703";
15186         case TG3_PHY_ID_BCM5704:        return "5704";
15187         case TG3_PHY_ID_BCM5705:        return "5705";
15188         case TG3_PHY_ID_BCM5750:        return "5750";
15189         case TG3_PHY_ID_BCM5752:        return "5752";
15190         case TG3_PHY_ID_BCM5714:        return "5714";
15191         case TG3_PHY_ID_BCM5780:        return "5780";
15192         case TG3_PHY_ID_BCM5755:        return "5755";
15193         case TG3_PHY_ID_BCM5787:        return "5787";
15194         case TG3_PHY_ID_BCM5784:        return "5784";
15195         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15196         case TG3_PHY_ID_BCM5906:        return "5906";
15197         case TG3_PHY_ID_BCM5761:        return "5761";
15198         case TG3_PHY_ID_BCM5718C:       return "5718C";
15199         case TG3_PHY_ID_BCM5718S:       return "5718S";
15200         case TG3_PHY_ID_BCM57765:       return "57765";
15201         case TG3_PHY_ID_BCM5719C:       return "5719C";
15202         case TG3_PHY_ID_BCM5720C:       return "5720C";
15203         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15204         case 0:                 return "serdes";
15205         default:                return "unknown";
15206         }
15207 }
15208
15209 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15210 {
15211         if (tg3_flag(tp, PCI_EXPRESS)) {
15212                 strcpy(str, "PCI Express");
15213                 return str;
15214         } else if (tg3_flag(tp, PCIX_MODE)) {
15215                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15216
15217                 strcpy(str, "PCIX:");
15218
15219                 if ((clock_ctrl == 7) ||
15220                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15221                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15222                         strcat(str, "133MHz");
15223                 else if (clock_ctrl == 0)
15224                         strcat(str, "33MHz");
15225                 else if (clock_ctrl == 2)
15226                         strcat(str, "50MHz");
15227                 else if (clock_ctrl == 4)
15228                         strcat(str, "66MHz");
15229                 else if (clock_ctrl == 6)
15230                         strcat(str, "100MHz");
15231         } else {
15232                 strcpy(str, "PCI:");
15233                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15234                         strcat(str, "66MHz");
15235                 else
15236                         strcat(str, "33MHz");
15237         }
15238         if (tg3_flag(tp, PCI_32BIT))
15239                 strcat(str, ":32-bit");
15240         else
15241                 strcat(str, ":64-bit");
15242         return str;
15243 }
15244
15245 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15246 {
15247         struct pci_dev *peer;
15248         unsigned int func, devnr = tp->pdev->devfn & ~7;
15249
15250         for (func = 0; func < 8; func++) {
15251                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15252                 if (peer && peer != tp->pdev)
15253                         break;
15254                 pci_dev_put(peer);
15255         }
15256         /* 5704 can be configured in single-port mode, set peer to
15257          * tp->pdev in that case.
15258          */
15259         if (!peer) {
15260                 peer = tp->pdev;
15261                 return peer;
15262         }
15263
15264         /*
15265          * We don't need to keep the refcount elevated; there's no way
15266          * to remove one half of this device without removing the other
15267          */
15268         pci_dev_put(peer);
15269
15270         return peer;
15271 }
15272
15273 static void __devinit tg3_init_coal(struct tg3 *tp)
15274 {
15275         struct ethtool_coalesce *ec = &tp->coal;
15276
15277         memset(ec, 0, sizeof(*ec));
15278         ec->cmd = ETHTOOL_GCOALESCE;
15279         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15280         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15281         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15282         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15283         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15284         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15285         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15286         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15287         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15288
15289         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15290                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15291                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15292                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15293                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15294                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15295         }
15296
15297         if (tg3_flag(tp, 5705_PLUS)) {
15298                 ec->rx_coalesce_usecs_irq = 0;
15299                 ec->tx_coalesce_usecs_irq = 0;
15300                 ec->stats_block_coalesce_usecs = 0;
15301         }
15302 }
15303
15304 static const struct net_device_ops tg3_netdev_ops = {
15305         .ndo_open               = tg3_open,
15306         .ndo_stop               = tg3_close,
15307         .ndo_start_xmit         = tg3_start_xmit,
15308         .ndo_get_stats64        = tg3_get_stats64,
15309         .ndo_validate_addr      = eth_validate_addr,
15310         .ndo_set_rx_mode        = tg3_set_rx_mode,
15311         .ndo_set_mac_address    = tg3_set_mac_addr,
15312         .ndo_do_ioctl           = tg3_ioctl,
15313         .ndo_tx_timeout         = tg3_tx_timeout,
15314         .ndo_change_mtu         = tg3_change_mtu,
15315         .ndo_fix_features       = tg3_fix_features,
15316         .ndo_set_features       = tg3_set_features,
15317 #ifdef CONFIG_NET_POLL_CONTROLLER
15318         .ndo_poll_controller    = tg3_poll_controller,
15319 #endif
15320 };
15321
15322 static int __devinit tg3_init_one(struct pci_dev *pdev,
15323                                   const struct pci_device_id *ent)
15324 {
15325         struct net_device *dev;
15326         struct tg3 *tp;
15327         int i, err, pm_cap;
15328         u32 sndmbx, rcvmbx, intmbx;
15329         char str[40];
15330         u64 dma_mask, persist_dma_mask;
15331         netdev_features_t features = 0;
15332
15333         printk_once(KERN_INFO "%s\n", version);
15334
15335         err = pci_enable_device(pdev);
15336         if (err) {
15337                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15338                 return err;
15339         }
15340
15341         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15342         if (err) {
15343                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15344                 goto err_out_disable_pdev;
15345         }
15346
15347         pci_set_master(pdev);
15348
15349         /* Find power-management capability. */
15350         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15351         if (pm_cap == 0) {
15352                 dev_err(&pdev->dev,
15353                         "Cannot find Power Management capability, aborting\n");
15354                 err = -EIO;
15355                 goto err_out_free_res;
15356         }
15357
15358         err = pci_set_power_state(pdev, PCI_D0);
15359         if (err) {
15360                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15361                 goto err_out_free_res;
15362         }
15363
15364         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15365         if (!dev) {
15366                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15367                 err = -ENOMEM;
15368                 goto err_out_power_down;
15369         }
15370
15371         SET_NETDEV_DEV(dev, &pdev->dev);
15372
15373         tp = netdev_priv(dev);
15374         tp->pdev = pdev;
15375         tp->dev = dev;
15376         tp->pm_cap = pm_cap;
15377         tp->rx_mode = TG3_DEF_RX_MODE;
15378         tp->tx_mode = TG3_DEF_TX_MODE;
15379
15380         if (tg3_debug > 0)
15381                 tp->msg_enable = tg3_debug;
15382         else
15383                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15384
15385         /* The word/byte swap controls here control register access byte
15386          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15387          * setting below.
15388          */
15389         tp->misc_host_ctrl =
15390                 MISC_HOST_CTRL_MASK_PCI_INT |
15391                 MISC_HOST_CTRL_WORD_SWAP |
15392                 MISC_HOST_CTRL_INDIR_ACCESS |
15393                 MISC_HOST_CTRL_PCISTATE_RW;
15394
15395         /* The NONFRM (non-frame) byte/word swap controls take effect
15396          * on descriptor entries, anything which isn't packet data.
15397          *
15398          * The StrongARM chips on the board (one for tx, one for rx)
15399          * are running in big-endian mode.
15400          */
15401         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15402                         GRC_MODE_WSWAP_NONFRM_DATA);
15403 #ifdef __BIG_ENDIAN
15404         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15405 #endif
15406         spin_lock_init(&tp->lock);
15407         spin_lock_init(&tp->indirect_lock);
15408         INIT_WORK(&tp->reset_task, tg3_reset_task);
15409
15410         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15411         if (!tp->regs) {
15412                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15413                 err = -ENOMEM;
15414                 goto err_out_free_dev;
15415         }
15416
15417         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15418             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15419             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15420             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15421             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15422             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15423             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15424             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15425                 tg3_flag_set(tp, ENABLE_APE);
15426                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15427                 if (!tp->aperegs) {
15428                         dev_err(&pdev->dev,
15429                                 "Cannot map APE registers, aborting\n");
15430                         err = -ENOMEM;
15431                         goto err_out_iounmap;
15432                 }
15433         }
15434
15435         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15436         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15437
15438         dev->ethtool_ops = &tg3_ethtool_ops;
15439         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15440         dev->netdev_ops = &tg3_netdev_ops;
15441         dev->irq = pdev->irq;
15442
15443         err = tg3_get_invariants(tp);
15444         if (err) {
15445                 dev_err(&pdev->dev,
15446                         "Problem fetching invariants of chip, aborting\n");
15447                 goto err_out_apeunmap;
15448         }
15449
15450         /* The EPB bridge inside 5714, 5715, and 5780 and any
15451          * device behind the EPB cannot support DMA addresses > 40-bit.
15452          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15453          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15454          * do DMA address check in tg3_start_xmit().
15455          */
15456         if (tg3_flag(tp, IS_5788))
15457                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15458         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15459                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15460 #ifdef CONFIG_HIGHMEM
15461                 dma_mask = DMA_BIT_MASK(64);
15462 #endif
15463         } else
15464                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15465
15466         /* Configure DMA attributes. */
15467         if (dma_mask > DMA_BIT_MASK(32)) {
15468                 err = pci_set_dma_mask(pdev, dma_mask);
15469                 if (!err) {
15470                         features |= NETIF_F_HIGHDMA;
15471                         err = pci_set_consistent_dma_mask(pdev,
15472                                                           persist_dma_mask);
15473                         if (err < 0) {
15474                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15475                                         "DMA for consistent allocations\n");
15476                                 goto err_out_apeunmap;
15477                         }
15478                 }
15479         }
15480         if (err || dma_mask == DMA_BIT_MASK(32)) {
15481                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15482                 if (err) {
15483                         dev_err(&pdev->dev,
15484                                 "No usable DMA configuration, aborting\n");
15485                         goto err_out_apeunmap;
15486                 }
15487         }
15488
15489         tg3_init_bufmgr_config(tp);
15490
15491         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15492
15493         /* 5700 B0 chips do not support checksumming correctly due
15494          * to hardware bugs.
15495          */
15496         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15497                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15498
15499                 if (tg3_flag(tp, 5755_PLUS))
15500                         features |= NETIF_F_IPV6_CSUM;
15501         }
15502
15503         /* TSO is on by default on chips that support hardware TSO.
15504          * Firmware TSO on older chips gives lower performance, so it
15505          * is off by default, but can be enabled using ethtool.
15506          */
15507         if ((tg3_flag(tp, HW_TSO_1) ||
15508              tg3_flag(tp, HW_TSO_2) ||
15509              tg3_flag(tp, HW_TSO_3)) &&
15510             (features & NETIF_F_IP_CSUM))
15511                 features |= NETIF_F_TSO;
15512         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15513                 if (features & NETIF_F_IPV6_CSUM)
15514                         features |= NETIF_F_TSO6;
15515                 if (tg3_flag(tp, HW_TSO_3) ||
15516                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15517                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15518                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15519                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15520                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15521                         features |= NETIF_F_TSO_ECN;
15522         }
15523
15524         dev->features |= features;
15525         dev->vlan_features |= features;
15526
15527         /*
15528          * Add loopback capability only for a subset of devices that support
15529          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15530          * loopback for the remaining devices.
15531          */
15532         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15533             !tg3_flag(tp, CPMU_PRESENT))
15534                 /* Add the loopback capability */
15535                 features |= NETIF_F_LOOPBACK;
15536
15537         dev->hw_features |= features;
15538
15539         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15540             !tg3_flag(tp, TSO_CAPABLE) &&
15541             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15542                 tg3_flag_set(tp, MAX_RXPEND_64);
15543                 tp->rx_pending = 63;
15544         }
15545
15546         err = tg3_get_device_address(tp);
15547         if (err) {
15548                 dev_err(&pdev->dev,
15549                         "Could not obtain valid ethernet address, aborting\n");
15550                 goto err_out_apeunmap;
15551         }
15552
15553         /*
15554          * Reset chip in case UNDI or EFI driver did not shutdown
15555          * DMA self test will enable WDMAC and we'll see (spurious)
15556          * pending DMA on the PCI bus at that point.
15557          */
15558         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15559             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15560                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15561                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15562         }
15563
15564         err = tg3_test_dma(tp);
15565         if (err) {
15566                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15567                 goto err_out_apeunmap;
15568         }
15569
15570         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15571         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15572         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15573         for (i = 0; i < tp->irq_max; i++) {
15574                 struct tg3_napi *tnapi = &tp->napi[i];
15575
15576                 tnapi->tp = tp;
15577                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15578
15579                 tnapi->int_mbox = intmbx;
15580                 if (i <= 4)
15581                         intmbx += 0x8;
15582                 else
15583                         intmbx += 0x4;
15584
15585                 tnapi->consmbox = rcvmbx;
15586                 tnapi->prodmbox = sndmbx;
15587
15588                 if (i)
15589                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15590                 else
15591                         tnapi->coal_now = HOSTCC_MODE_NOW;
15592
15593                 if (!tg3_flag(tp, SUPPORT_MSIX))
15594                         break;
15595
15596                 /*
15597                  * If we support MSIX, we'll be using RSS.  If we're using
15598                  * RSS, the first vector only handles link interrupts and the
15599                  * remaining vectors handle rx and tx interrupts.  Reuse the
15600                  * mailbox values for the next iteration.  The values we setup
15601                  * above are still useful for the single vectored mode.
15602                  */
15603                 if (!i)
15604                         continue;
15605
15606                 rcvmbx += 0x8;
15607
15608                 if (sndmbx & 0x4)
15609                         sndmbx -= 0x4;
15610                 else
15611                         sndmbx += 0xc;
15612         }
15613
15614         tg3_init_coal(tp);
15615
15616         pci_set_drvdata(pdev, dev);
15617
15618         if (tg3_flag(tp, 5717_PLUS)) {
15619                 /* Resume a low-power mode */
15620                 tg3_frob_aux_power(tp, false);
15621         }
15622
15623         err = register_netdev(dev);
15624         if (err) {
15625                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15626                 goto err_out_apeunmap;
15627         }
15628
15629         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15630                     tp->board_part_number,
15631                     tp->pci_chip_rev_id,
15632                     tg3_bus_string(tp, str),
15633                     dev->dev_addr);
15634
15635         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15636                 struct phy_device *phydev;
15637                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15638                 netdev_info(dev,
15639                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15640                             phydev->drv->name, dev_name(&phydev->dev));
15641         } else {
15642                 char *ethtype;
15643
15644                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15645                         ethtype = "10/100Base-TX";
15646                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15647                         ethtype = "1000Base-SX";
15648                 else
15649                         ethtype = "10/100/1000Base-T";
15650
15651                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15652                             "(WireSpeed[%d], EEE[%d])\n",
15653                             tg3_phy_string(tp), ethtype,
15654                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15655                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15656         }
15657
15658         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15659                     (dev->features & NETIF_F_RXCSUM) != 0,
15660                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15661                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15662                     tg3_flag(tp, ENABLE_ASF) != 0,
15663                     tg3_flag(tp, TSO_CAPABLE) != 0);
15664         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15665                     tp->dma_rwctrl,
15666                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15667                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15668
15669         pci_save_state(pdev);
15670
15671         return 0;
15672
15673 err_out_apeunmap:
15674         if (tp->aperegs) {
15675                 iounmap(tp->aperegs);
15676                 tp->aperegs = NULL;
15677         }
15678
15679 err_out_iounmap:
15680         if (tp->regs) {
15681                 iounmap(tp->regs);
15682                 tp->regs = NULL;
15683         }
15684
15685 err_out_free_dev:
15686         free_netdev(dev);
15687
15688 err_out_power_down:
15689         pci_set_power_state(pdev, PCI_D3hot);
15690
15691 err_out_free_res:
15692         pci_release_regions(pdev);
15693
15694 err_out_disable_pdev:
15695         pci_disable_device(pdev);
15696         pci_set_drvdata(pdev, NULL);
15697         return err;
15698 }
15699
15700 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15701 {
15702         struct net_device *dev = pci_get_drvdata(pdev);
15703
15704         if (dev) {
15705                 struct tg3 *tp = netdev_priv(dev);
15706
15707                 if (tp->fw)
15708                         release_firmware(tp->fw);
15709
15710                 tg3_reset_task_cancel(tp);
15711
15712                 if (tg3_flag(tp, USE_PHYLIB)) {
15713                         tg3_phy_fini(tp);
15714                         tg3_mdio_fini(tp);
15715                 }
15716
15717                 unregister_netdev(dev);
15718                 if (tp->aperegs) {
15719                         iounmap(tp->aperegs);
15720                         tp->aperegs = NULL;
15721                 }
15722                 if (tp->regs) {
15723                         iounmap(tp->regs);
15724                         tp->regs = NULL;
15725                 }
15726                 free_netdev(dev);
15727                 pci_release_regions(pdev);
15728                 pci_disable_device(pdev);
15729                 pci_set_drvdata(pdev, NULL);
15730         }
15731 }
15732
15733 #ifdef CONFIG_PM_SLEEP
15734 static int tg3_suspend(struct device *device)
15735 {
15736         struct pci_dev *pdev = to_pci_dev(device);
15737         struct net_device *dev = pci_get_drvdata(pdev);
15738         struct tg3 *tp = netdev_priv(dev);
15739         int err;
15740
15741         if (!netif_running(dev))
15742                 return 0;
15743
15744         tg3_reset_task_cancel(tp);
15745         tg3_phy_stop(tp);
15746         tg3_netif_stop(tp);
15747
15748         del_timer_sync(&tp->timer);
15749
15750         tg3_full_lock(tp, 1);
15751         tg3_disable_ints(tp);
15752         tg3_full_unlock(tp);
15753
15754         netif_device_detach(dev);
15755
15756         tg3_full_lock(tp, 0);
15757         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15758         tg3_flag_clear(tp, INIT_COMPLETE);
15759         tg3_full_unlock(tp);
15760
15761         err = tg3_power_down_prepare(tp);
15762         if (err) {
15763                 int err2;
15764
15765                 tg3_full_lock(tp, 0);
15766
15767                 tg3_flag_set(tp, INIT_COMPLETE);
15768                 err2 = tg3_restart_hw(tp, 1);
15769                 if (err2)
15770                         goto out;
15771
15772                 tp->timer.expires = jiffies + tp->timer_offset;
15773                 add_timer(&tp->timer);
15774
15775                 netif_device_attach(dev);
15776                 tg3_netif_start(tp);
15777
15778 out:
15779                 tg3_full_unlock(tp);
15780
15781                 if (!err2)
15782                         tg3_phy_start(tp);
15783         }
15784
15785         return err;
15786 }
15787
15788 static int tg3_resume(struct device *device)
15789 {
15790         struct pci_dev *pdev = to_pci_dev(device);
15791         struct net_device *dev = pci_get_drvdata(pdev);
15792         struct tg3 *tp = netdev_priv(dev);
15793         int err;
15794
15795         if (!netif_running(dev))
15796                 return 0;
15797
15798         netif_device_attach(dev);
15799
15800         tg3_full_lock(tp, 0);
15801
15802         tg3_flag_set(tp, INIT_COMPLETE);
15803         err = tg3_restart_hw(tp, 1);
15804         if (err)
15805                 goto out;
15806
15807         tp->timer.expires = jiffies + tp->timer_offset;
15808         add_timer(&tp->timer);
15809
15810         tg3_netif_start(tp);
15811
15812 out:
15813         tg3_full_unlock(tp);
15814
15815         if (!err)
15816                 tg3_phy_start(tp);
15817
15818         return err;
15819 }
15820
15821 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15822 #define TG3_PM_OPS (&tg3_pm_ops)
15823
15824 #else
15825
15826 #define TG3_PM_OPS NULL
15827
15828 #endif /* CONFIG_PM_SLEEP */
15829
15830 /**
15831  * tg3_io_error_detected - called when PCI error is detected
15832  * @pdev: Pointer to PCI device
15833  * @state: The current pci connection state
15834  *
15835  * This function is called after a PCI bus error affecting
15836  * this device has been detected.
15837  */
15838 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15839                                               pci_channel_state_t state)
15840 {
15841         struct net_device *netdev = pci_get_drvdata(pdev);
15842         struct tg3 *tp = netdev_priv(netdev);
15843         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15844
15845         netdev_info(netdev, "PCI I/O error detected\n");
15846
15847         rtnl_lock();
15848
15849         if (!netif_running(netdev))
15850                 goto done;
15851
15852         tg3_phy_stop(tp);
15853
15854         tg3_netif_stop(tp);
15855
15856         del_timer_sync(&tp->timer);
15857
15858         /* Want to make sure that the reset task doesn't run */
15859         tg3_reset_task_cancel(tp);
15860         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15861
15862         netif_device_detach(netdev);
15863
15864         /* Clean up software state, even if MMIO is blocked */
15865         tg3_full_lock(tp, 0);
15866         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15867         tg3_full_unlock(tp);
15868
15869 done:
15870         if (state == pci_channel_io_perm_failure)
15871                 err = PCI_ERS_RESULT_DISCONNECT;
15872         else
15873                 pci_disable_device(pdev);
15874
15875         rtnl_unlock();
15876
15877         return err;
15878 }
15879
15880 /**
15881  * tg3_io_slot_reset - called after the pci bus has been reset.
15882  * @pdev: Pointer to PCI device
15883  *
15884  * Restart the card from scratch, as if from a cold-boot.
15885  * At this point, the card has exprienced a hard reset,
15886  * followed by fixups by BIOS, and has its config space
15887  * set up identically to what it was at cold boot.
15888  */
15889 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15890 {
15891         struct net_device *netdev = pci_get_drvdata(pdev);
15892         struct tg3 *tp = netdev_priv(netdev);
15893         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15894         int err;
15895
15896         rtnl_lock();
15897
15898         if (pci_enable_device(pdev)) {
15899                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15900                 goto done;
15901         }
15902
15903         pci_set_master(pdev);
15904         pci_restore_state(pdev);
15905         pci_save_state(pdev);
15906
15907         if (!netif_running(netdev)) {
15908                 rc = PCI_ERS_RESULT_RECOVERED;
15909                 goto done;
15910         }
15911
15912         err = tg3_power_up(tp);
15913         if (err)
15914                 goto done;
15915
15916         rc = PCI_ERS_RESULT_RECOVERED;
15917
15918 done:
15919         rtnl_unlock();
15920
15921         return rc;
15922 }
15923
15924 /**
15925  * tg3_io_resume - called when traffic can start flowing again.
15926  * @pdev: Pointer to PCI device
15927  *
15928  * This callback is called when the error recovery driver tells
15929  * us that its OK to resume normal operation.
15930  */
15931 static void tg3_io_resume(struct pci_dev *pdev)
15932 {
15933         struct net_device *netdev = pci_get_drvdata(pdev);
15934         struct tg3 *tp = netdev_priv(netdev);
15935         int err;
15936
15937         rtnl_lock();
15938
15939         if (!netif_running(netdev))
15940                 goto done;
15941
15942         tg3_full_lock(tp, 0);
15943         tg3_flag_set(tp, INIT_COMPLETE);
15944         err = tg3_restart_hw(tp, 1);
15945         tg3_full_unlock(tp);
15946         if (err) {
15947                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15948                 goto done;
15949         }
15950
15951         netif_device_attach(netdev);
15952
15953         tp->timer.expires = jiffies + tp->timer_offset;
15954         add_timer(&tp->timer);
15955
15956         tg3_netif_start(tp);
15957
15958         tg3_phy_start(tp);
15959
15960 done:
15961         rtnl_unlock();
15962 }
15963
15964 static struct pci_error_handlers tg3_err_handler = {
15965         .error_detected = tg3_io_error_detected,
15966         .slot_reset     = tg3_io_slot_reset,
15967         .resume         = tg3_io_resume
15968 };
15969
15970 static struct pci_driver tg3_driver = {
15971         .name           = DRV_MODULE_NAME,
15972         .id_table       = tg3_pci_tbl,
15973         .probe          = tg3_init_one,
15974         .remove         = __devexit_p(tg3_remove_one),
15975         .err_handler    = &tg3_err_handler,
15976         .driver.pm      = TG3_PM_OPS,
15977 };
15978
15979 static int __init tg3_init(void)
15980 {
15981         return pci_register_driver(&tg3_driver);
15982 }
15983
15984 static void __exit tg3_cleanup(void)
15985 {
15986         pci_unregister_driver(&tg3_driver);
15987 }
15988
15989 module_init(tg3_init);
15990 module_exit(tg3_cleanup);