2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 121
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "November 2, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 #define TG3_RSS_INDIR_TBL_SIZE 128
140 /* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
147 #define TG3_TX_RING_SIZE 512
148 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
158 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
160 #define TG3_DMA_BYTE_ENAB 64
162 #define TG3_RX_STD_DMA_SZ 1536
163 #define TG3_RX_JMB_DMA_SZ 9046
165 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
167 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
187 #define TG3_RX_COPY_THRESHOLD 256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
197 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314 static const struct {
315 const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
349 { "tx_flow_control" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 writel(val, tp->regs + off);
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 return readl(tp->regs + off);
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 writel(val, tp->aperegs + off);
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 return readl(tp->aperegs + off);
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 writel(val, tp->regs + off);
449 readl(tp->regs + off);
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
468 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 TG3_64BIT_REG_LOW, val);
473 if (off == TG3_RX_STD_PROD_IDX_REG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 TG3_64BIT_REG_LOW, val);
479 spin_lock_irqsave(&tp->indirect_lock, flags);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 spin_unlock_irqrestore(&tp->indirect_lock, flags);
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 /* Non-posted methods */
515 tp->write32(tp, off, val);
518 tg3_write32(tp, off, val);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 tp->write32_mbox(tp, off, val);
533 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 tp->read32_mbox(tp, off);
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 void __iomem *mbox = tp->regs + off;
541 if (tg3_flag(tp, TXD_MBOX_HWBUG))
543 if (tg3_flag(tp, MBOX_WRITE_REORDER))
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 return readl(tp->regs + off + GRCMBOX_BASE);
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 writel(val, tp->regs + off + GRCMBOX_BASE);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
576 spin_lock_irqsave(&tp->indirect_lock, flags);
577 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590 spin_unlock_irqrestore(&tp->indirect_lock, flags);
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
603 spin_lock_irqsave(&tp->indirect_lock, flags);
604 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 *val = tr32(TG3PCI_MEM_WIN_DATA);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617 spin_unlock_irqrestore(&tp->indirect_lock, flags);
620 static void tg3_ape_lock_init(struct tg3 *tp)
625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 regbase = TG3_APE_LOCK_GRANT;
628 regbase = TG3_APE_PER_LOCK_GRANT;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
633 case TG3_APE_LOCK_PHY0:
634 case TG3_APE_LOCK_PHY1:
635 case TG3_APE_LOCK_PHY2:
636 case TG3_APE_LOCK_PHY3:
637 bit = APE_LOCK_GRANT_DRIVER;
641 bit = APE_LOCK_GRANT_DRIVER;
643 bit = 1 << tp->pci_fn;
645 tg3_ape_write32(tp, regbase + 4 * i, bit);
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
654 u32 status, req, gnt, bit;
656 if (!tg3_flag(tp, ENABLE_APE))
660 case TG3_APE_LOCK_GPIO:
661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663 case TG3_APE_LOCK_GRC:
664 case TG3_APE_LOCK_MEM:
666 bit = APE_LOCK_REQ_DRIVER;
668 bit = 1 << tp->pci_fn;
674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 req = TG3_APE_LOCK_REQ;
676 gnt = TG3_APE_LOCK_GRANT;
678 req = TG3_APE_PER_LOCK_REQ;
679 gnt = TG3_APE_PER_LOCK_GRANT;
684 tg3_ape_write32(tp, req + off, bit);
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i = 0; i < 100; i++) {
688 status = tg3_ape_read32(tp, gnt + off);
695 /* Revoke the lock request. */
696 tg3_ape_write32(tp, gnt + off, bit);
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
707 if (!tg3_flag(tp, ENABLE_APE))
711 case TG3_APE_LOCK_GPIO:
712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
714 case TG3_APE_LOCK_GRC:
715 case TG3_APE_LOCK_MEM:
717 bit = APE_LOCK_GRANT_DRIVER;
719 bit = 1 << tp->pci_fn;
725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 gnt = TG3_APE_LOCK_GRANT;
728 gnt = TG3_APE_PER_LOCK_GRANT;
730 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp, APE_HAS_NCSI))
742 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 if (apedata != APE_SEG_SIG_MAGIC)
746 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 if (!(apedata & APE_FW_STATUS_READY))
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i = 0; i < 10; i++) {
752 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
755 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 event | APE_EVENT_STATUS_EVENT_PENDING);
761 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
769 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
778 if (!tg3_flag(tp, ENABLE_APE))
782 case RESET_KIND_INIT:
783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 APE_HOST_SEG_SIG_MAGIC);
785 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 APE_HOST_SEG_LEN_MAGIC);
787 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 APE_HOST_BEHAV_NO_PHYLOCK);
793 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 TG3_APE_HOST_DRVR_STATE_START);
796 event = APE_EVENT_STATUS_STATE_START;
798 case RESET_KIND_SHUTDOWN:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
804 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
806 if (device_may_wakeup(&tp->pdev->dev) &&
807 tg3_flag(tp, WOL_ENABLE)) {
808 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 TG3_APE_HOST_WOL_SPEED_AUTO);
810 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
812 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
814 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
816 event = APE_EVENT_STATUS_STATE_UNLOAD;
818 case RESET_KIND_SUSPEND:
819 event = APE_EVENT_STATUS_STATE_SUSPEND;
825 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
827 tg3_ape_send_event(tp, event);
830 static void tg3_disable_ints(struct tg3 *tp)
834 tw32(TG3PCI_MISC_HOST_CTRL,
835 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836 for (i = 0; i < tp->irq_max; i++)
837 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
840 static void tg3_enable_ints(struct tg3 *tp)
847 tw32(TG3PCI_MISC_HOST_CTRL,
848 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
850 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851 for (i = 0; i < tp->irq_cnt; i++) {
852 struct tg3_napi *tnapi = &tp->napi[i];
854 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855 if (tg3_flag(tp, 1SHOT_MSI))
856 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
858 tp->coal_now |= tnapi->coal_now;
861 /* Force an initial interrupt */
862 if (!tg3_flag(tp, TAGGED_STATUS) &&
863 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
866 tw32(HOSTCC_MODE, tp->coal_now);
868 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
873 struct tg3 *tp = tnapi->tp;
874 struct tg3_hw_status *sblk = tnapi->hw_status;
875 unsigned int work_exists = 0;
877 /* check for phy events */
878 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879 if (sblk->status & SD_STATUS_LINK_CHG)
882 /* check for RX/TX work to do */
883 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
891 * similar to tg3_enable_ints, but it accurately determines whether there
892 * is new work pending and can return without flushing the PIO write
893 * which reenables interrupts
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
897 struct tg3 *tp = tnapi->tp;
899 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
902 /* When doing tagged status, this work check is unnecessary.
903 * The last_tag we write above tells the chip which piece of
904 * work we've completed.
906 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907 tw32(HOSTCC_MODE, tp->coalesce_mode |
908 HOSTCC_MODE_ENABLE | tnapi->coal_now);
911 static void tg3_switch_clocks(struct tg3 *tp)
916 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
919 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
921 orig_clock_ctrl = clock_ctrl;
922 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923 CLOCK_CTRL_CLKRUN_OENABLE |
925 tp->pci_clock_ctrl = clock_ctrl;
927 if (tg3_flag(tp, 5705_PLUS)) {
928 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929 tw32_wait_f(TG3PCI_CLOCK_CTRL,
930 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
932 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933 tw32_wait_f(TG3PCI_CLOCK_CTRL,
935 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
937 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938 clock_ctrl | (CLOCK_CTRL_ALTCLK),
941 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
944 #define PHY_BUSY_LOOPS 5000
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
952 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
954 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
960 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961 MI_COM_PHY_ADDR_MASK);
962 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963 MI_COM_REG_ADDR_MASK);
964 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
966 tw32_f(MAC_MI_COM, frame_val);
968 loops = PHY_BUSY_LOOPS;
971 frame_val = tr32(MAC_MI_COM);
973 if ((frame_val & MI_COM_BUSY) == 0) {
975 frame_val = tr32(MAC_MI_COM);
983 *val = frame_val & MI_COM_DATA_MASK;
987 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988 tw32_f(MAC_MI_MODE, tp->mi_mode);
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1001 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1005 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1007 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1011 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012 MI_COM_PHY_ADDR_MASK);
1013 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014 MI_COM_REG_ADDR_MASK);
1015 frame_val |= (val & MI_COM_DATA_MASK);
1016 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1018 tw32_f(MAC_MI_COM, frame_val);
1020 loops = PHY_BUSY_LOOPS;
1021 while (loops != 0) {
1023 frame_val = tr32(MAC_MI_COM);
1024 if ((frame_val & MI_COM_BUSY) == 0) {
1026 frame_val = tr32(MAC_MI_COM);
1036 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037 tw32_f(MAC_MI_MODE, tp->mi_mode);
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1048 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1052 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1056 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1061 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1071 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1075 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1079 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1084 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1094 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1096 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1105 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1107 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1116 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118 MII_TG3_AUXCTL_SHDWSEL_MISC);
1120 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1127 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128 set |= MII_TG3_AUXCTL_MISC_WREN;
1130 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136 MII_TG3_AUXCTL_ACTL_TX_6DB)
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 MII_TG3_AUXCTL_ACTL_TX_6DB);
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1147 /* OK, reset it, and poll the BMCR_RESET bit until it
1148 * clears or we time out.
1150 phy_control = BMCR_RESET;
1151 err = tg3_writephy(tp, MII_BMCR, phy_control);
1157 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1161 if ((phy_control & BMCR_RESET) == 0) {
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1175 struct tg3 *tp = bp->priv;
1178 spin_lock_bh(&tp->lock);
1180 if (tg3_readphy(tp, reg, &val))
1183 spin_unlock_bh(&tp->lock);
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1190 struct tg3 *tp = bp->priv;
1193 spin_lock_bh(&tp->lock);
1195 if (tg3_writephy(tp, reg, val))
1198 spin_unlock_bh(&tp->lock);
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1211 struct phy_device *phydev;
1213 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215 case PHY_ID_BCM50610:
1216 case PHY_ID_BCM50610M:
1217 val = MAC_PHYCFG2_50610_LED_MODES;
1219 case PHY_ID_BCMAC131:
1220 val = MAC_PHYCFG2_AC131_LED_MODES;
1222 case PHY_ID_RTL8211C:
1223 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1225 case PHY_ID_RTL8201E:
1226 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1232 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233 tw32(MAC_PHYCFG2, val);
1235 val = tr32(MAC_PHYCFG1);
1236 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239 tw32(MAC_PHYCFG1, val);
1244 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246 MAC_PHYCFG2_FMODE_MASK_MASK |
1247 MAC_PHYCFG2_GMODE_MASK_MASK |
1248 MAC_PHYCFG2_ACT_MASK_MASK |
1249 MAC_PHYCFG2_QUAL_MASK_MASK |
1250 MAC_PHYCFG2_INBAND_ENABLE;
1252 tw32(MAC_PHYCFG2, val);
1254 val = tr32(MAC_PHYCFG1);
1255 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1263 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265 tw32(MAC_PHYCFG1, val);
1267 val = tr32(MAC_EXT_RGMII_MODE);
1268 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269 MAC_RGMII_MODE_RX_QUALITY |
1270 MAC_RGMII_MODE_RX_ACTIVITY |
1271 MAC_RGMII_MODE_RX_ENG_DET |
1272 MAC_RGMII_MODE_TX_ENABLE |
1273 MAC_RGMII_MODE_TX_LOWPWR |
1274 MAC_RGMII_MODE_TX_RESET);
1275 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277 val |= MAC_RGMII_MODE_RX_INT_B |
1278 MAC_RGMII_MODE_RX_QUALITY |
1279 MAC_RGMII_MODE_RX_ACTIVITY |
1280 MAC_RGMII_MODE_RX_ENG_DET;
1281 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282 val |= MAC_RGMII_MODE_TX_ENABLE |
1283 MAC_RGMII_MODE_TX_LOWPWR |
1284 MAC_RGMII_MODE_TX_RESET;
1286 tw32(MAC_EXT_RGMII_MODE, val);
1289 static void tg3_mdio_start(struct tg3 *tp)
1291 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292 tw32_f(MAC_MI_MODE, tp->mi_mode);
1295 if (tg3_flag(tp, MDIOBUS_INITED) &&
1296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297 tg3_mdio_config_5785(tp);
1300 static int tg3_mdio_init(struct tg3 *tp)
1304 struct phy_device *phydev;
1306 if (tg3_flag(tp, 5717_PLUS)) {
1309 tp->phy_addr = tp->pci_fn + 1;
1311 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1314 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315 TG3_CPMU_PHY_STRAP_IS_SERDES;
1319 tp->phy_addr = TG3_PHY_MII_ADDR;
1323 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1326 tp->mdio_bus = mdiobus_alloc();
1327 if (tp->mdio_bus == NULL)
1330 tp->mdio_bus->name = "tg3 mdio bus";
1331 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333 tp->mdio_bus->priv = tp;
1334 tp->mdio_bus->parent = &tp->pdev->dev;
1335 tp->mdio_bus->read = &tg3_mdio_read;
1336 tp->mdio_bus->write = &tg3_mdio_write;
1337 tp->mdio_bus->reset = &tg3_mdio_reset;
1338 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339 tp->mdio_bus->irq = &tp->mdio_irq[0];
1341 for (i = 0; i < PHY_MAX_ADDR; i++)
1342 tp->mdio_bus->irq[i] = PHY_POLL;
1344 /* The bus registration will look for all the PHYs on the mdio bus.
1345 * Unfortunately, it does not ensure the PHY is powered up before
1346 * accessing the PHY ID registers. A chip reset is the
1347 * quickest way to bring the device back to an operational state..
1349 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1352 i = mdiobus_register(tp->mdio_bus);
1354 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355 mdiobus_free(tp->mdio_bus);
1359 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1361 if (!phydev || !phydev->drv) {
1362 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363 mdiobus_unregister(tp->mdio_bus);
1364 mdiobus_free(tp->mdio_bus);
1368 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369 case PHY_ID_BCM57780:
1370 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1373 case PHY_ID_BCM50610:
1374 case PHY_ID_BCM50610M:
1375 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376 PHY_BRCM_RX_REFCLK_UNUSED |
1377 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1386 case PHY_ID_RTL8211C:
1387 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1389 case PHY_ID_RTL8201E:
1390 case PHY_ID_BCMAC131:
1391 phydev->interface = PHY_INTERFACE_MODE_MII;
1392 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1397 tg3_flag_set(tp, MDIOBUS_INITED);
1399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400 tg3_mdio_config_5785(tp);
1405 static void tg3_mdio_fini(struct tg3 *tp)
1407 if (tg3_flag(tp, MDIOBUS_INITED)) {
1408 tg3_flag_clear(tp, MDIOBUS_INITED);
1409 mdiobus_unregister(tp->mdio_bus);
1410 mdiobus_free(tp->mdio_bus);
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1419 val = tr32(GRC_RX_CPU_EVENT);
1420 val |= GRC_RX_CPU_DRIVER_EVENT;
1421 tw32_f(GRC_RX_CPU_EVENT, val);
1423 tp->last_event_jiffies = jiffies;
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1432 unsigned int delay_cnt;
1435 /* If enough time has passed, no wait is necessary. */
1436 time_remain = (long)(tp->last_event_jiffies + 1 +
1437 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1439 if (time_remain < 0)
1442 /* Check if we can shorten the wait time. */
1443 delay_cnt = jiffies_to_usecs(time_remain);
1444 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446 delay_cnt = (delay_cnt >> 3) + 1;
1448 for (i = 0; i < delay_cnt; i++) {
1449 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1461 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1464 tg3_wait_for_event_ack(tp);
1466 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1468 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1471 if (!tg3_readphy(tp, MII_BMCR, ®))
1473 if (!tg3_readphy(tp, MII_BMSR, ®))
1474 val |= (reg & 0xffff);
1475 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1478 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1480 if (!tg3_readphy(tp, MII_LPA, ®))
1481 val |= (reg & 0xffff);
1482 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1485 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1488 if (!tg3_readphy(tp, MII_STAT1000, ®))
1489 val |= (reg & 0xffff);
1491 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1493 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1497 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1499 tg3_generate_fw_event(tp);
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1505 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506 /* Wait for RX cpu to ACK the previous event. */
1507 tg3_wait_for_event_ack(tp);
1509 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1511 tg3_generate_fw_event(tp);
1513 /* Wait for RX cpu to ACK this event. */
1514 tg3_wait_for_event_ack(tp);
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1521 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1524 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1526 case RESET_KIND_INIT:
1527 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1531 case RESET_KIND_SHUTDOWN:
1532 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1536 case RESET_KIND_SUSPEND:
1537 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1546 if (kind == RESET_KIND_INIT ||
1547 kind == RESET_KIND_SUSPEND)
1548 tg3_ape_driver_state_change(tp, kind);
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1554 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1556 case RESET_KIND_INIT:
1557 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558 DRV_STATE_START_DONE);
1561 case RESET_KIND_SHUTDOWN:
1562 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563 DRV_STATE_UNLOAD_DONE);
1571 if (kind == RESET_KIND_SHUTDOWN)
1572 tg3_ape_driver_state_change(tp, kind);
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1578 if (tg3_flag(tp, ENABLE_ASF)) {
1580 case RESET_KIND_INIT:
1581 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1585 case RESET_KIND_SHUTDOWN:
1586 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1590 case RESET_KIND_SUSPEND:
1591 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1601 static int tg3_poll_fw(struct tg3 *tp)
1606 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607 /* Wait up to 20ms for init done. */
1608 for (i = 0; i < 200; i++) {
1609 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1616 /* Wait for firmware initialization to complete. */
1617 for (i = 0; i < 100000; i++) {
1618 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1624 /* Chip might not be fitted with firmware. Some Sun onboard
1625 * parts are configured like that. So don't signal the timeout
1626 * of the above loop as an error, but do report the lack of
1627 * running firmware once.
1629 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630 tg3_flag_set(tp, NO_FWARE_REPORTED);
1632 netdev_info(tp->dev, "No firmware running\n");
1635 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636 /* The 57765 A0 needs a little more
1637 * time to do some important work.
1645 static void tg3_link_report(struct tg3 *tp)
1647 if (!netif_carrier_ok(tp->dev)) {
1648 netif_info(tp, link, tp->dev, "Link is down\n");
1649 tg3_ump_link_report(tp);
1650 } else if (netif_msg_link(tp)) {
1651 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652 (tp->link_config.active_speed == SPEED_1000 ?
1654 (tp->link_config.active_speed == SPEED_100 ?
1656 (tp->link_config.active_duplex == DUPLEX_FULL ?
1659 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1662 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1665 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666 netdev_info(tp->dev, "EEE is %s\n",
1667 tp->setlpicnt ? "enabled" : "disabled");
1669 tg3_ump_link_report(tp);
1673 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1677 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678 miireg = ADVERTISE_PAUSE_CAP;
1679 else if (flow_ctrl & FLOW_CTRL_TX)
1680 miireg = ADVERTISE_PAUSE_ASYM;
1681 else if (flow_ctrl & FLOW_CTRL_RX)
1682 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1689 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1693 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1694 miireg = ADVERTISE_1000XPAUSE;
1695 else if (flow_ctrl & FLOW_CTRL_TX)
1696 miireg = ADVERTISE_1000XPSE_ASYM;
1697 else if (flow_ctrl & FLOW_CTRL_RX)
1698 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1705 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1709 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1710 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1711 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1712 if (lcladv & ADVERTISE_1000XPAUSE)
1714 if (rmtadv & ADVERTISE_1000XPAUSE)
1721 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1725 u32 old_rx_mode = tp->rx_mode;
1726 u32 old_tx_mode = tp->tx_mode;
1728 if (tg3_flag(tp, USE_PHYLIB))
1729 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1731 autoneg = tp->link_config.autoneg;
1733 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1734 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1735 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1737 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1739 flowctrl = tp->link_config.flowctrl;
1741 tp->link_config.active_flowctrl = flowctrl;
1743 if (flowctrl & FLOW_CTRL_RX)
1744 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1746 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1748 if (old_rx_mode != tp->rx_mode)
1749 tw32_f(MAC_RX_MODE, tp->rx_mode);
1751 if (flowctrl & FLOW_CTRL_TX)
1752 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1754 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1756 if (old_tx_mode != tp->tx_mode)
1757 tw32_f(MAC_TX_MODE, tp->tx_mode);
1760 static void tg3_adjust_link(struct net_device *dev)
1762 u8 oldflowctrl, linkmesg = 0;
1763 u32 mac_mode, lcl_adv, rmt_adv;
1764 struct tg3 *tp = netdev_priv(dev);
1765 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1767 spin_lock_bh(&tp->lock);
1769 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1770 MAC_MODE_HALF_DUPLEX);
1772 oldflowctrl = tp->link_config.active_flowctrl;
1778 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1779 mac_mode |= MAC_MODE_PORT_MODE_MII;
1780 else if (phydev->speed == SPEED_1000 ||
1781 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1782 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1784 mac_mode |= MAC_MODE_PORT_MODE_MII;
1786 if (phydev->duplex == DUPLEX_HALF)
1787 mac_mode |= MAC_MODE_HALF_DUPLEX;
1789 lcl_adv = tg3_advert_flowctrl_1000T(
1790 tp->link_config.flowctrl);
1793 rmt_adv = LPA_PAUSE_CAP;
1794 if (phydev->asym_pause)
1795 rmt_adv |= LPA_PAUSE_ASYM;
1798 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1800 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1802 if (mac_mode != tp->mac_mode) {
1803 tp->mac_mode = mac_mode;
1804 tw32_f(MAC_MODE, tp->mac_mode);
1808 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1809 if (phydev->speed == SPEED_10)
1811 MAC_MI_STAT_10MBPS_MODE |
1812 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1814 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1817 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1818 tw32(MAC_TX_LENGTHS,
1819 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1820 (6 << TX_LENGTHS_IPG_SHIFT) |
1821 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1823 tw32(MAC_TX_LENGTHS,
1824 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1825 (6 << TX_LENGTHS_IPG_SHIFT) |
1826 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1828 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1829 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1830 phydev->speed != tp->link_config.active_speed ||
1831 phydev->duplex != tp->link_config.active_duplex ||
1832 oldflowctrl != tp->link_config.active_flowctrl)
1835 tp->link_config.active_speed = phydev->speed;
1836 tp->link_config.active_duplex = phydev->duplex;
1838 spin_unlock_bh(&tp->lock);
1841 tg3_link_report(tp);
1844 static int tg3_phy_init(struct tg3 *tp)
1846 struct phy_device *phydev;
1848 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1851 /* Bring the PHY back to a known state. */
1854 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1856 /* Attach the MAC to the PHY. */
1857 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1858 phydev->dev_flags, phydev->interface);
1859 if (IS_ERR(phydev)) {
1860 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1861 return PTR_ERR(phydev);
1864 /* Mask with MAC supported features. */
1865 switch (phydev->interface) {
1866 case PHY_INTERFACE_MODE_GMII:
1867 case PHY_INTERFACE_MODE_RGMII:
1868 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1869 phydev->supported &= (PHY_GBIT_FEATURES |
1871 SUPPORTED_Asym_Pause);
1875 case PHY_INTERFACE_MODE_MII:
1876 phydev->supported &= (PHY_BASIC_FEATURES |
1878 SUPPORTED_Asym_Pause);
1881 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1885 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1887 phydev->advertising = phydev->supported;
1892 static void tg3_phy_start(struct tg3 *tp)
1894 struct phy_device *phydev;
1896 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1899 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1901 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1902 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1903 phydev->speed = tp->link_config.orig_speed;
1904 phydev->duplex = tp->link_config.orig_duplex;
1905 phydev->autoneg = tp->link_config.orig_autoneg;
1906 phydev->advertising = tp->link_config.orig_advertising;
1911 phy_start_aneg(phydev);
1914 static void tg3_phy_stop(struct tg3 *tp)
1916 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1919 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1922 static void tg3_phy_fini(struct tg3 *tp)
1924 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1925 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1930 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1935 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1938 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1939 /* Cannot do read-modify-write on 5401 */
1940 err = tg3_phy_auxctl_write(tp,
1941 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1942 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1947 err = tg3_phy_auxctl_read(tp,
1948 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1952 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1953 err = tg3_phy_auxctl_write(tp,
1954 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1960 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1964 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1967 tg3_writephy(tp, MII_TG3_FET_TEST,
1968 phytest | MII_TG3_FET_SHADOW_EN);
1969 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1971 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1973 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1974 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1976 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1980 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1984 if (!tg3_flag(tp, 5705_PLUS) ||
1985 (tg3_flag(tp, 5717_PLUS) &&
1986 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1989 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1990 tg3_phy_fet_toggle_apd(tp, enable);
1994 reg = MII_TG3_MISC_SHDW_WREN |
1995 MII_TG3_MISC_SHDW_SCR5_SEL |
1996 MII_TG3_MISC_SHDW_SCR5_LPED |
1997 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1998 MII_TG3_MISC_SHDW_SCR5_SDTL |
1999 MII_TG3_MISC_SHDW_SCR5_C125OE;
2000 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2001 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2003 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2006 reg = MII_TG3_MISC_SHDW_WREN |
2007 MII_TG3_MISC_SHDW_APD_SEL |
2008 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2010 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2012 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2015 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2019 if (!tg3_flag(tp, 5705_PLUS) ||
2020 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2023 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2026 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2027 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2029 tg3_writephy(tp, MII_TG3_FET_TEST,
2030 ephy | MII_TG3_FET_SHADOW_EN);
2031 if (!tg3_readphy(tp, reg, &phy)) {
2033 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2035 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2036 tg3_writephy(tp, reg, phy);
2038 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2043 ret = tg3_phy_auxctl_read(tp,
2044 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2047 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2049 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2050 tg3_phy_auxctl_write(tp,
2051 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2056 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2061 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2064 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2066 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2067 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2070 static void tg3_phy_apply_otp(struct tg3 *tp)
2079 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2082 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2083 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2084 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2086 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2087 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2088 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2090 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2091 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2092 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2094 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2095 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2097 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2098 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2100 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2101 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2102 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2104 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2107 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2111 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2116 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2117 current_link_up == 1 &&
2118 tp->link_config.active_duplex == DUPLEX_FULL &&
2119 (tp->link_config.active_speed == SPEED_100 ||
2120 tp->link_config.active_speed == SPEED_1000)) {
2123 if (tp->link_config.active_speed == SPEED_1000)
2124 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2126 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2128 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2130 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2131 TG3_CL45_D7_EEERES_STAT, &val);
2133 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2134 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2138 if (!tp->setlpicnt) {
2139 if (current_link_up == 1 &&
2140 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2141 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2142 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2145 val = tr32(TG3_CPMU_EEE_MODE);
2146 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2150 static void tg3_phy_eee_enable(struct tg3 *tp)
2154 if (tp->link_config.active_speed == SPEED_1000 &&
2155 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2158 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2159 val = MII_TG3_DSP_TAP26_ALNOKO |
2160 MII_TG3_DSP_TAP26_RMRXSTO;
2161 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2162 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2165 val = tr32(TG3_CPMU_EEE_MODE);
2166 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2169 static int tg3_wait_macro_done(struct tg3 *tp)
2176 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2177 if ((tmp32 & 0x1000) == 0)
2187 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2189 static const u32 test_pat[4][6] = {
2190 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2191 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2192 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2193 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2197 for (chan = 0; chan < 4; chan++) {
2200 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2201 (chan * 0x2000) | 0x0200);
2202 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2204 for (i = 0; i < 6; i++)
2205 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2208 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2209 if (tg3_wait_macro_done(tp)) {
2214 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2215 (chan * 0x2000) | 0x0200);
2216 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2217 if (tg3_wait_macro_done(tp)) {
2222 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2223 if (tg3_wait_macro_done(tp)) {
2228 for (i = 0; i < 6; i += 2) {
2231 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2232 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2233 tg3_wait_macro_done(tp)) {
2239 if (low != test_pat[chan][i] ||
2240 high != test_pat[chan][i+1]) {
2241 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2242 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2243 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2253 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2257 for (chan = 0; chan < 4; chan++) {
2260 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2261 (chan * 0x2000) | 0x0200);
2262 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2263 for (i = 0; i < 6; i++)
2264 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2265 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2266 if (tg3_wait_macro_done(tp))
2273 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2275 u32 reg32, phy9_orig;
2276 int retries, do_phy_reset, err;
2282 err = tg3_bmcr_reset(tp);
2288 /* Disable transmitter and interrupt. */
2289 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2293 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2295 /* Set full-duplex, 1000 mbps. */
2296 tg3_writephy(tp, MII_BMCR,
2297 BMCR_FULLDPLX | BMCR_SPEED1000);
2299 /* Set to master mode. */
2300 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2303 tg3_writephy(tp, MII_CTRL1000,
2304 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2306 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2310 /* Block the PHY control access. */
2311 tg3_phydsp_write(tp, 0x8005, 0x0800);
2313 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2316 } while (--retries);
2318 err = tg3_phy_reset_chanpat(tp);
2322 tg3_phydsp_write(tp, 0x8005, 0x0000);
2324 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2325 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2327 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2329 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2331 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2333 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2340 /* This will reset the tigon3 PHY if there is no valid
2341 * link unless the FORCE argument is non-zero.
2343 static int tg3_phy_reset(struct tg3 *tp)
2348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2349 val = tr32(GRC_MISC_CFG);
2350 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2353 err = tg3_readphy(tp, MII_BMSR, &val);
2354 err |= tg3_readphy(tp, MII_BMSR, &val);
2358 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2359 netif_carrier_off(tp->dev);
2360 tg3_link_report(tp);
2363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2366 err = tg3_phy_reset_5703_4_5(tp);
2373 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2374 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2375 cpmuctrl = tr32(TG3_CPMU_CTRL);
2376 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2378 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2381 err = tg3_bmcr_reset(tp);
2385 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2386 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2387 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2389 tw32(TG3_CPMU_CTRL, cpmuctrl);
2392 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2393 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2394 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2395 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2396 CPMU_LSPD_1000MB_MACCLK_12_5) {
2397 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2399 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2403 if (tg3_flag(tp, 5717_PLUS) &&
2404 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2407 tg3_phy_apply_otp(tp);
2409 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2410 tg3_phy_toggle_apd(tp, true);
2412 tg3_phy_toggle_apd(tp, false);
2415 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2416 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2417 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2418 tg3_phydsp_write(tp, 0x000a, 0x0323);
2419 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2422 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2423 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2424 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2427 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2428 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2429 tg3_phydsp_write(tp, 0x000a, 0x310b);
2430 tg3_phydsp_write(tp, 0x201f, 0x9506);
2431 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2432 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2434 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2435 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2436 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2437 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2438 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2439 tg3_writephy(tp, MII_TG3_TEST1,
2440 MII_TG3_TEST1_TRIM_EN | 0x4);
2442 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2444 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2448 /* Set Extended packet length bit (bit 14) on all chips that */
2449 /* support jumbo frames */
2450 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2451 /* Cannot do read-modify-write on 5401 */
2452 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2453 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2454 /* Set bit 14 with read-modify-write to preserve other bits */
2455 err = tg3_phy_auxctl_read(tp,
2456 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2458 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2459 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2462 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2463 * jumbo frames transmission.
2465 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2466 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2467 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2468 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2472 /* adjust output voltage */
2473 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2476 tg3_phy_toggle_automdix(tp, 1);
2477 tg3_phy_set_wirespeed(tp);
2481 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2482 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2483 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2484 TG3_GPIO_MSG_NEED_VAUX)
2485 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2486 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2487 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2488 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2489 (TG3_GPIO_MSG_DRVR_PRES << 12))
2491 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2492 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2493 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2494 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2495 (TG3_GPIO_MSG_NEED_VAUX << 12))
2497 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2502 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2503 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2505 status = tr32(TG3_CPMU_DRV_STATUS);
2507 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2508 status &= ~(TG3_GPIO_MSG_MASK << shift);
2509 status |= (newstat << shift);
2511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2513 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2515 tw32(TG3_CPMU_DRV_STATUS, status);
2517 return status >> TG3_APE_GPIO_MSG_SHIFT;
2520 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2522 if (!tg3_flag(tp, IS_NIC))
2525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2528 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2531 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2533 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2534 TG3_GRC_LCLCTL_PWRSW_DELAY);
2536 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2538 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2539 TG3_GRC_LCLCTL_PWRSW_DELAY);
2545 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2549 if (!tg3_flag(tp, IS_NIC) ||
2550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2554 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2556 tw32_wait_f(GRC_LOCAL_CTRL,
2557 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2558 TG3_GRC_LCLCTL_PWRSW_DELAY);
2560 tw32_wait_f(GRC_LOCAL_CTRL,
2562 TG3_GRC_LCLCTL_PWRSW_DELAY);
2564 tw32_wait_f(GRC_LOCAL_CTRL,
2565 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2566 TG3_GRC_LCLCTL_PWRSW_DELAY);
2569 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2571 if (!tg3_flag(tp, IS_NIC))
2574 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2575 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2576 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2577 (GRC_LCLCTRL_GPIO_OE0 |
2578 GRC_LCLCTRL_GPIO_OE1 |
2579 GRC_LCLCTRL_GPIO_OE2 |
2580 GRC_LCLCTRL_GPIO_OUTPUT0 |
2581 GRC_LCLCTRL_GPIO_OUTPUT1),
2582 TG3_GRC_LCLCTL_PWRSW_DELAY);
2583 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2584 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2585 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2586 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2587 GRC_LCLCTRL_GPIO_OE1 |
2588 GRC_LCLCTRL_GPIO_OE2 |
2589 GRC_LCLCTRL_GPIO_OUTPUT0 |
2590 GRC_LCLCTRL_GPIO_OUTPUT1 |
2592 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2593 TG3_GRC_LCLCTL_PWRSW_DELAY);
2595 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2596 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2597 TG3_GRC_LCLCTL_PWRSW_DELAY);
2599 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2600 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601 TG3_GRC_LCLCTL_PWRSW_DELAY);
2604 u32 grc_local_ctrl = 0;
2606 /* Workaround to prevent overdrawing Amps. */
2607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2608 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2609 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2611 TG3_GRC_LCLCTL_PWRSW_DELAY);
2614 /* On 5753 and variants, GPIO2 cannot be used. */
2615 no_gpio2 = tp->nic_sram_data_cfg &
2616 NIC_SRAM_DATA_CFG_NO_GPIO2;
2618 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2619 GRC_LCLCTRL_GPIO_OE1 |
2620 GRC_LCLCTRL_GPIO_OE2 |
2621 GRC_LCLCTRL_GPIO_OUTPUT1 |
2622 GRC_LCLCTRL_GPIO_OUTPUT2;
2624 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2625 GRC_LCLCTRL_GPIO_OUTPUT2);
2627 tw32_wait_f(GRC_LOCAL_CTRL,
2628 tp->grc_local_ctrl | grc_local_ctrl,
2629 TG3_GRC_LCLCTL_PWRSW_DELAY);
2631 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2633 tw32_wait_f(GRC_LOCAL_CTRL,
2634 tp->grc_local_ctrl | grc_local_ctrl,
2635 TG3_GRC_LCLCTL_PWRSW_DELAY);
2638 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2639 tw32_wait_f(GRC_LOCAL_CTRL,
2640 tp->grc_local_ctrl | grc_local_ctrl,
2641 TG3_GRC_LCLCTL_PWRSW_DELAY);
2646 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2650 /* Serialize power state transitions */
2651 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2654 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2655 msg = TG3_GPIO_MSG_NEED_VAUX;
2657 msg = tg3_set_function_status(tp, msg);
2659 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2662 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2663 tg3_pwrsrc_switch_to_vaux(tp);
2665 tg3_pwrsrc_die_with_vmain(tp);
2668 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2671 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2673 bool need_vaux = false;
2675 /* The GPIOs do something completely different on 57765. */
2676 if (!tg3_flag(tp, IS_NIC) ||
2677 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2680 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2682 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2683 tg3_frob_aux_power_5717(tp, include_wol ?
2684 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2688 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2689 struct net_device *dev_peer;
2691 dev_peer = pci_get_drvdata(tp->pdev_peer);
2693 /* remove_one() may have been run on the peer. */
2695 struct tg3 *tp_peer = netdev_priv(dev_peer);
2697 if (tg3_flag(tp_peer, INIT_COMPLETE))
2700 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2701 tg3_flag(tp_peer, ENABLE_ASF))
2706 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2707 tg3_flag(tp, ENABLE_ASF))
2711 tg3_pwrsrc_switch_to_vaux(tp);
2713 tg3_pwrsrc_die_with_vmain(tp);
2716 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2718 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2720 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2721 if (speed != SPEED_10)
2723 } else if (speed == SPEED_10)
2729 static int tg3_setup_phy(struct tg3 *, int);
2730 static int tg3_halt_cpu(struct tg3 *, u32);
2732 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2736 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2737 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2738 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2739 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2742 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2743 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2744 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2749 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2751 val = tr32(GRC_MISC_CFG);
2752 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2755 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2757 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2760 tg3_writephy(tp, MII_ADVERTISE, 0);
2761 tg3_writephy(tp, MII_BMCR,
2762 BMCR_ANENABLE | BMCR_ANRESTART);
2764 tg3_writephy(tp, MII_TG3_FET_TEST,
2765 phytest | MII_TG3_FET_SHADOW_EN);
2766 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2767 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2769 MII_TG3_FET_SHDW_AUXMODE4,
2772 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2775 } else if (do_low_power) {
2776 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2777 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2779 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2780 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2781 MII_TG3_AUXCTL_PCTL_VREG_11V;
2782 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2785 /* The PHY should not be powered down on some chips because
2788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2789 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2790 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2791 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2794 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2795 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2796 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2797 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2798 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2799 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2802 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2805 /* tp->lock is held. */
2806 static int tg3_nvram_lock(struct tg3 *tp)
2808 if (tg3_flag(tp, NVRAM)) {
2811 if (tp->nvram_lock_cnt == 0) {
2812 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2813 for (i = 0; i < 8000; i++) {
2814 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2819 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2823 tp->nvram_lock_cnt++;
2828 /* tp->lock is held. */
2829 static void tg3_nvram_unlock(struct tg3 *tp)
2831 if (tg3_flag(tp, NVRAM)) {
2832 if (tp->nvram_lock_cnt > 0)
2833 tp->nvram_lock_cnt--;
2834 if (tp->nvram_lock_cnt == 0)
2835 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2839 /* tp->lock is held. */
2840 static void tg3_enable_nvram_access(struct tg3 *tp)
2842 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2843 u32 nvaccess = tr32(NVRAM_ACCESS);
2845 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2849 /* tp->lock is held. */
2850 static void tg3_disable_nvram_access(struct tg3 *tp)
2852 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2853 u32 nvaccess = tr32(NVRAM_ACCESS);
2855 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2859 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2860 u32 offset, u32 *val)
2865 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2868 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2869 EEPROM_ADDR_DEVID_MASK |
2871 tw32(GRC_EEPROM_ADDR,
2873 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2874 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2875 EEPROM_ADDR_ADDR_MASK) |
2876 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2878 for (i = 0; i < 1000; i++) {
2879 tmp = tr32(GRC_EEPROM_ADDR);
2881 if (tmp & EEPROM_ADDR_COMPLETE)
2885 if (!(tmp & EEPROM_ADDR_COMPLETE))
2888 tmp = tr32(GRC_EEPROM_DATA);
2891 * The data will always be opposite the native endian
2892 * format. Perform a blind byteswap to compensate.
2899 #define NVRAM_CMD_TIMEOUT 10000
2901 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2905 tw32(NVRAM_CMD, nvram_cmd);
2906 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2908 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2914 if (i == NVRAM_CMD_TIMEOUT)
2920 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2922 if (tg3_flag(tp, NVRAM) &&
2923 tg3_flag(tp, NVRAM_BUFFERED) &&
2924 tg3_flag(tp, FLASH) &&
2925 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2926 (tp->nvram_jedecnum == JEDEC_ATMEL))
2928 addr = ((addr / tp->nvram_pagesize) <<
2929 ATMEL_AT45DB0X1B_PAGE_POS) +
2930 (addr % tp->nvram_pagesize);
2935 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2937 if (tg3_flag(tp, NVRAM) &&
2938 tg3_flag(tp, NVRAM_BUFFERED) &&
2939 tg3_flag(tp, FLASH) &&
2940 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2941 (tp->nvram_jedecnum == JEDEC_ATMEL))
2943 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2944 tp->nvram_pagesize) +
2945 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2950 /* NOTE: Data read in from NVRAM is byteswapped according to
2951 * the byteswapping settings for all other register accesses.
2952 * tg3 devices are BE devices, so on a BE machine, the data
2953 * returned will be exactly as it is seen in NVRAM. On a LE
2954 * machine, the 32-bit value will be byteswapped.
2956 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2960 if (!tg3_flag(tp, NVRAM))
2961 return tg3_nvram_read_using_eeprom(tp, offset, val);
2963 offset = tg3_nvram_phys_addr(tp, offset);
2965 if (offset > NVRAM_ADDR_MSK)
2968 ret = tg3_nvram_lock(tp);
2972 tg3_enable_nvram_access(tp);
2974 tw32(NVRAM_ADDR, offset);
2975 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2976 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2979 *val = tr32(NVRAM_RDDATA);
2981 tg3_disable_nvram_access(tp);
2983 tg3_nvram_unlock(tp);
2988 /* Ensures NVRAM data is in bytestream format. */
2989 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2992 int res = tg3_nvram_read(tp, offset, &v);
2994 *val = cpu_to_be32(v);
2998 #define RX_CPU_SCRATCH_BASE 0x30000
2999 #define RX_CPU_SCRATCH_SIZE 0x04000
3000 #define TX_CPU_SCRATCH_BASE 0x34000
3001 #define TX_CPU_SCRATCH_SIZE 0x04000
3003 /* tp->lock is held. */
3004 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3008 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3010 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3011 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3013 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3016 if (offset == RX_CPU_BASE) {
3017 for (i = 0; i < 10000; i++) {
3018 tw32(offset + CPU_STATE, 0xffffffff);
3019 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3020 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3024 tw32(offset + CPU_STATE, 0xffffffff);
3025 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3028 for (i = 0; i < 10000; i++) {
3029 tw32(offset + CPU_STATE, 0xffffffff);
3030 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3031 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3037 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3038 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3042 /* Clear firmware's nvram arbitration. */
3043 if (tg3_flag(tp, NVRAM))
3044 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3049 unsigned int fw_base;
3050 unsigned int fw_len;
3051 const __be32 *fw_data;
3054 /* tp->lock is held. */
3055 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3056 u32 cpu_scratch_base, int cpu_scratch_size,
3057 struct fw_info *info)
3059 int err, lock_err, i;
3060 void (*write_op)(struct tg3 *, u32, u32);
3062 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3064 "%s: Trying to load TX cpu firmware which is 5705\n",
3069 if (tg3_flag(tp, 5705_PLUS))
3070 write_op = tg3_write_mem;
3072 write_op = tg3_write_indirect_reg32;
3074 /* It is possible that bootcode is still loading at this point.
3075 * Get the nvram lock first before halting the cpu.
3077 lock_err = tg3_nvram_lock(tp);
3078 err = tg3_halt_cpu(tp, cpu_base);
3080 tg3_nvram_unlock(tp);
3084 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3085 write_op(tp, cpu_scratch_base + i, 0);
3086 tw32(cpu_base + CPU_STATE, 0xffffffff);
3087 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3088 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3089 write_op(tp, (cpu_scratch_base +
3090 (info->fw_base & 0xffff) +
3092 be32_to_cpu(info->fw_data[i]));
3100 /* tp->lock is held. */
3101 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3103 struct fw_info info;
3104 const __be32 *fw_data;
3107 fw_data = (void *)tp->fw->data;
3109 /* Firmware blob starts with version numbers, followed by
3110 start address and length. We are setting complete length.
3111 length = end_address_of_bss - start_address_of_text.
3112 Remainder is the blob to be loaded contiguously
3113 from start address. */
3115 info.fw_base = be32_to_cpu(fw_data[1]);
3116 info.fw_len = tp->fw->size - 12;
3117 info.fw_data = &fw_data[3];
3119 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3120 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3125 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3126 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3131 /* Now startup only the RX cpu. */
3132 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3133 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3135 for (i = 0; i < 5; i++) {
3136 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3138 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3139 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3140 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3144 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3145 "should be %08x\n", __func__,
3146 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3149 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3150 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3155 /* tp->lock is held. */
3156 static int tg3_load_tso_firmware(struct tg3 *tp)
3158 struct fw_info info;
3159 const __be32 *fw_data;
3160 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3163 if (tg3_flag(tp, HW_TSO_1) ||
3164 tg3_flag(tp, HW_TSO_2) ||
3165 tg3_flag(tp, HW_TSO_3))
3168 fw_data = (void *)tp->fw->data;
3170 /* Firmware blob starts with version numbers, followed by
3171 start address and length. We are setting complete length.
3172 length = end_address_of_bss - start_address_of_text.
3173 Remainder is the blob to be loaded contiguously
3174 from start address. */
3176 info.fw_base = be32_to_cpu(fw_data[1]);
3177 cpu_scratch_size = tp->fw_len;
3178 info.fw_len = tp->fw->size - 12;
3179 info.fw_data = &fw_data[3];
3181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3182 cpu_base = RX_CPU_BASE;
3183 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3185 cpu_base = TX_CPU_BASE;
3186 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3187 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3190 err = tg3_load_firmware_cpu(tp, cpu_base,
3191 cpu_scratch_base, cpu_scratch_size,
3196 /* Now startup the cpu. */
3197 tw32(cpu_base + CPU_STATE, 0xffffffff);
3198 tw32_f(cpu_base + CPU_PC, info.fw_base);
3200 for (i = 0; i < 5; i++) {
3201 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3203 tw32(cpu_base + CPU_STATE, 0xffffffff);
3204 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3205 tw32_f(cpu_base + CPU_PC, info.fw_base);
3210 "%s fails to set CPU PC, is %08x should be %08x\n",
3211 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3214 tw32(cpu_base + CPU_STATE, 0xffffffff);
3215 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3220 /* tp->lock is held. */
3221 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3223 u32 addr_high, addr_low;
3226 addr_high = ((tp->dev->dev_addr[0] << 8) |
3227 tp->dev->dev_addr[1]);
3228 addr_low = ((tp->dev->dev_addr[2] << 24) |
3229 (tp->dev->dev_addr[3] << 16) |
3230 (tp->dev->dev_addr[4] << 8) |
3231 (tp->dev->dev_addr[5] << 0));
3232 for (i = 0; i < 4; i++) {
3233 if (i == 1 && skip_mac_1)
3235 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3236 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3239 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3240 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3241 for (i = 0; i < 12; i++) {
3242 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3243 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3247 addr_high = (tp->dev->dev_addr[0] +
3248 tp->dev->dev_addr[1] +
3249 tp->dev->dev_addr[2] +
3250 tp->dev->dev_addr[3] +
3251 tp->dev->dev_addr[4] +
3252 tp->dev->dev_addr[5]) &
3253 TX_BACKOFF_SEED_MASK;
3254 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3257 static void tg3_enable_register_access(struct tg3 *tp)
3260 * Make sure register accesses (indirect or otherwise) will function
3263 pci_write_config_dword(tp->pdev,
3264 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3267 static int tg3_power_up(struct tg3 *tp)
3271 tg3_enable_register_access(tp);
3273 err = pci_set_power_state(tp->pdev, PCI_D0);
3275 /* Switch out of Vaux if it is a NIC */
3276 tg3_pwrsrc_switch_to_vmain(tp);
3278 netdev_err(tp->dev, "Transition to D0 failed\n");
3284 static int tg3_power_down_prepare(struct tg3 *tp)
3287 bool device_should_wake, do_low_power;
3289 tg3_enable_register_access(tp);
3291 /* Restore the CLKREQ setting. */
3292 if (tg3_flag(tp, CLKREQ_BUG)) {
3295 pci_read_config_word(tp->pdev,
3296 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3298 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3299 pci_write_config_word(tp->pdev,
3300 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3304 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3305 tw32(TG3PCI_MISC_HOST_CTRL,
3306 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3308 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3309 tg3_flag(tp, WOL_ENABLE);
3311 if (tg3_flag(tp, USE_PHYLIB)) {
3312 do_low_power = false;
3313 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3314 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3315 struct phy_device *phydev;
3316 u32 phyid, advertising;
3318 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3320 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3322 tp->link_config.orig_speed = phydev->speed;
3323 tp->link_config.orig_duplex = phydev->duplex;
3324 tp->link_config.orig_autoneg = phydev->autoneg;
3325 tp->link_config.orig_advertising = phydev->advertising;
3327 advertising = ADVERTISED_TP |
3329 ADVERTISED_Autoneg |
3330 ADVERTISED_10baseT_Half;
3332 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3333 if (tg3_flag(tp, WOL_SPEED_100MB))
3335 ADVERTISED_100baseT_Half |
3336 ADVERTISED_100baseT_Full |
3337 ADVERTISED_10baseT_Full;
3339 advertising |= ADVERTISED_10baseT_Full;
3342 phydev->advertising = advertising;
3344 phy_start_aneg(phydev);
3346 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3347 if (phyid != PHY_ID_BCMAC131) {
3348 phyid &= PHY_BCM_OUI_MASK;
3349 if (phyid == PHY_BCM_OUI_1 ||
3350 phyid == PHY_BCM_OUI_2 ||
3351 phyid == PHY_BCM_OUI_3)
3352 do_low_power = true;
3356 do_low_power = true;
3358 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3359 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3360 tp->link_config.orig_speed = tp->link_config.speed;
3361 tp->link_config.orig_duplex = tp->link_config.duplex;
3362 tp->link_config.orig_autoneg = tp->link_config.autoneg;
3365 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3366 tp->link_config.speed = SPEED_10;
3367 tp->link_config.duplex = DUPLEX_HALF;
3368 tp->link_config.autoneg = AUTONEG_ENABLE;
3369 tg3_setup_phy(tp, 0);
3373 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3376 val = tr32(GRC_VCPU_EXT_CTRL);
3377 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3378 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3382 for (i = 0; i < 200; i++) {
3383 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3384 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3389 if (tg3_flag(tp, WOL_CAP))
3390 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3391 WOL_DRV_STATE_SHUTDOWN |
3395 if (device_should_wake) {
3398 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3400 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3401 tg3_phy_auxctl_write(tp,
3402 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3403 MII_TG3_AUXCTL_PCTL_WOL_EN |
3404 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3405 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3409 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3410 mac_mode = MAC_MODE_PORT_MODE_GMII;
3412 mac_mode = MAC_MODE_PORT_MODE_MII;
3414 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3415 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3417 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3418 SPEED_100 : SPEED_10;
3419 if (tg3_5700_link_polarity(tp, speed))
3420 mac_mode |= MAC_MODE_LINK_POLARITY;
3422 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3425 mac_mode = MAC_MODE_PORT_MODE_TBI;
3428 if (!tg3_flag(tp, 5750_PLUS))
3429 tw32(MAC_LED_CTRL, tp->led_ctrl);
3431 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3432 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3433 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3434 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3436 if (tg3_flag(tp, ENABLE_APE))
3437 mac_mode |= MAC_MODE_APE_TX_EN |
3438 MAC_MODE_APE_RX_EN |
3439 MAC_MODE_TDE_ENABLE;
3441 tw32_f(MAC_MODE, mac_mode);
3444 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3448 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3449 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3453 base_val = tp->pci_clock_ctrl;
3454 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3455 CLOCK_CTRL_TXCLK_DISABLE);
3457 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3458 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3459 } else if (tg3_flag(tp, 5780_CLASS) ||
3460 tg3_flag(tp, CPMU_PRESENT) ||
3461 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3463 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3464 u32 newbits1, newbits2;
3466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3467 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3468 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3469 CLOCK_CTRL_TXCLK_DISABLE |
3471 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3472 } else if (tg3_flag(tp, 5705_PLUS)) {
3473 newbits1 = CLOCK_CTRL_625_CORE;
3474 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3476 newbits1 = CLOCK_CTRL_ALTCLK;
3477 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3480 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3483 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3486 if (!tg3_flag(tp, 5705_PLUS)) {
3489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3491 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3492 CLOCK_CTRL_TXCLK_DISABLE |
3493 CLOCK_CTRL_44MHZ_CORE);
3495 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3498 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3499 tp->pci_clock_ctrl | newbits3, 40);
3503 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3504 tg3_power_down_phy(tp, do_low_power);
3506 tg3_frob_aux_power(tp, true);
3508 /* Workaround for unstable PLL clock */
3509 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3510 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3511 u32 val = tr32(0x7d00);
3513 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3515 if (!tg3_flag(tp, ENABLE_ASF)) {
3518 err = tg3_nvram_lock(tp);
3519 tg3_halt_cpu(tp, RX_CPU_BASE);
3521 tg3_nvram_unlock(tp);
3525 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3530 static void tg3_power_down(struct tg3 *tp)
3532 tg3_power_down_prepare(tp);
3534 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3535 pci_set_power_state(tp->pdev, PCI_D3hot);
3538 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3540 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3541 case MII_TG3_AUX_STAT_10HALF:
3543 *duplex = DUPLEX_HALF;
3546 case MII_TG3_AUX_STAT_10FULL:
3548 *duplex = DUPLEX_FULL;
3551 case MII_TG3_AUX_STAT_100HALF:
3553 *duplex = DUPLEX_HALF;
3556 case MII_TG3_AUX_STAT_100FULL:
3558 *duplex = DUPLEX_FULL;
3561 case MII_TG3_AUX_STAT_1000HALF:
3562 *speed = SPEED_1000;
3563 *duplex = DUPLEX_HALF;
3566 case MII_TG3_AUX_STAT_1000FULL:
3567 *speed = SPEED_1000;
3568 *duplex = DUPLEX_FULL;
3572 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3573 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3575 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3579 *speed = SPEED_INVALID;
3580 *duplex = DUPLEX_INVALID;
3585 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3590 new_adv = ADVERTISE_CSMA;
3591 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3592 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3594 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3598 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3601 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3603 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3604 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3605 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3607 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3611 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3614 tw32(TG3_CPMU_EEE_MODE,
3615 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3617 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3622 /* Advertise 100-BaseTX EEE ability */
3623 if (advertise & ADVERTISED_100baseT_Full)
3624 val |= MDIO_AN_EEE_ADV_100TX;
3625 /* Advertise 1000-BaseT EEE ability */
3626 if (advertise & ADVERTISED_1000baseT_Full)
3627 val |= MDIO_AN_EEE_ADV_1000T;
3628 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3632 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3634 case ASIC_REV_57765:
3636 /* If we advertised any eee advertisements above... */
3638 val = MII_TG3_DSP_TAP26_ALNOKO |
3639 MII_TG3_DSP_TAP26_RMRXSTO |
3640 MII_TG3_DSP_TAP26_OPCSINPT;
3641 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3644 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3645 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3646 MII_TG3_DSP_CH34TP2_HIBW01);
3649 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3658 static void tg3_phy_copper_begin(struct tg3 *tp)
3663 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3664 new_adv = ADVERTISED_10baseT_Half |
3665 ADVERTISED_10baseT_Full;
3666 if (tg3_flag(tp, WOL_SPEED_100MB))
3667 new_adv |= ADVERTISED_100baseT_Half |
3668 ADVERTISED_100baseT_Full;
3670 tg3_phy_autoneg_cfg(tp, new_adv,
3671 FLOW_CTRL_TX | FLOW_CTRL_RX);
3672 } else if (tp->link_config.speed == SPEED_INVALID) {
3673 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3674 tp->link_config.advertising &=
3675 ~(ADVERTISED_1000baseT_Half |
3676 ADVERTISED_1000baseT_Full);
3678 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3679 tp->link_config.flowctrl);
3681 /* Asking for a specific link mode. */
3682 if (tp->link_config.speed == SPEED_1000) {
3683 if (tp->link_config.duplex == DUPLEX_FULL)
3684 new_adv = ADVERTISED_1000baseT_Full;
3686 new_adv = ADVERTISED_1000baseT_Half;
3687 } else if (tp->link_config.speed == SPEED_100) {
3688 if (tp->link_config.duplex == DUPLEX_FULL)
3689 new_adv = ADVERTISED_100baseT_Full;
3691 new_adv = ADVERTISED_100baseT_Half;
3693 if (tp->link_config.duplex == DUPLEX_FULL)
3694 new_adv = ADVERTISED_10baseT_Full;
3696 new_adv = ADVERTISED_10baseT_Half;
3699 tg3_phy_autoneg_cfg(tp, new_adv,
3700 tp->link_config.flowctrl);
3703 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3704 tp->link_config.speed != SPEED_INVALID) {
3705 u32 bmcr, orig_bmcr;
3707 tp->link_config.active_speed = tp->link_config.speed;
3708 tp->link_config.active_duplex = tp->link_config.duplex;
3711 switch (tp->link_config.speed) {
3717 bmcr |= BMCR_SPEED100;
3721 bmcr |= BMCR_SPEED1000;
3725 if (tp->link_config.duplex == DUPLEX_FULL)
3726 bmcr |= BMCR_FULLDPLX;
3728 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3729 (bmcr != orig_bmcr)) {
3730 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3731 for (i = 0; i < 1500; i++) {
3735 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3736 tg3_readphy(tp, MII_BMSR, &tmp))
3738 if (!(tmp & BMSR_LSTATUS)) {
3743 tg3_writephy(tp, MII_BMCR, bmcr);
3747 tg3_writephy(tp, MII_BMCR,
3748 BMCR_ANENABLE | BMCR_ANRESTART);
3752 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3756 /* Turn off tap power management. */
3757 /* Set Extended packet length bit */
3758 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3760 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3761 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3762 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3763 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3764 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3771 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3773 u32 advmsk, tgtadv, advertising;
3775 advertising = tp->link_config.advertising;
3776 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3778 advmsk = ADVERTISE_ALL;
3779 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3780 tgtadv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3781 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3784 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3787 if ((*lcladv & advmsk) != tgtadv)
3790 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3793 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3795 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3798 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3799 if (tg3_ctrl != tgtadv)
3806 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
3810 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3813 if (tg3_readphy(tp, MII_STAT1000, &val))
3816 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
3819 if (tg3_readphy(tp, MII_LPA, rmtadv))
3822 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
3823 tp->link_config.rmt_adv = lpeth;
3828 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3830 int current_link_up;
3832 u32 lcl_adv, rmt_adv;
3840 (MAC_STATUS_SYNC_CHANGED |
3841 MAC_STATUS_CFG_CHANGED |
3842 MAC_STATUS_MI_COMPLETION |
3843 MAC_STATUS_LNKSTATE_CHANGED));
3846 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3848 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3852 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3854 /* Some third-party PHYs need to be reset on link going
3857 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3858 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3859 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3860 netif_carrier_ok(tp->dev)) {
3861 tg3_readphy(tp, MII_BMSR, &bmsr);
3862 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3863 !(bmsr & BMSR_LSTATUS))
3869 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3870 tg3_readphy(tp, MII_BMSR, &bmsr);
3871 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3872 !tg3_flag(tp, INIT_COMPLETE))
3875 if (!(bmsr & BMSR_LSTATUS)) {
3876 err = tg3_init_5401phy_dsp(tp);
3880 tg3_readphy(tp, MII_BMSR, &bmsr);
3881 for (i = 0; i < 1000; i++) {
3883 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3884 (bmsr & BMSR_LSTATUS)) {
3890 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3891 TG3_PHY_REV_BCM5401_B0 &&
3892 !(bmsr & BMSR_LSTATUS) &&
3893 tp->link_config.active_speed == SPEED_1000) {
3894 err = tg3_phy_reset(tp);
3896 err = tg3_init_5401phy_dsp(tp);
3901 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3902 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3903 /* 5701 {A0,B0} CRC bug workaround */
3904 tg3_writephy(tp, 0x15, 0x0a75);
3905 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3906 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3907 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3910 /* Clear pending interrupts... */
3911 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3912 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3914 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3915 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3916 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3917 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3920 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3921 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3922 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3923 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3925 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3928 current_link_up = 0;
3929 current_speed = SPEED_INVALID;
3930 current_duplex = DUPLEX_INVALID;
3931 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
3932 tp->link_config.rmt_adv = 0;
3934 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3935 err = tg3_phy_auxctl_read(tp,
3936 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3938 if (!err && !(val & (1 << 10))) {
3939 tg3_phy_auxctl_write(tp,
3940 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3947 for (i = 0; i < 100; i++) {
3948 tg3_readphy(tp, MII_BMSR, &bmsr);
3949 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3950 (bmsr & BMSR_LSTATUS))
3955 if (bmsr & BMSR_LSTATUS) {
3958 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3959 for (i = 0; i < 2000; i++) {
3961 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3966 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3971 for (i = 0; i < 200; i++) {
3972 tg3_readphy(tp, MII_BMCR, &bmcr);
3973 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3975 if (bmcr && bmcr != 0x7fff)
3983 tp->link_config.active_speed = current_speed;
3984 tp->link_config.active_duplex = current_duplex;
3986 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3987 if ((bmcr & BMCR_ANENABLE) &&
3988 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
3989 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
3990 current_link_up = 1;
3992 if (!(bmcr & BMCR_ANENABLE) &&
3993 tp->link_config.speed == current_speed &&
3994 tp->link_config.duplex == current_duplex &&
3995 tp->link_config.flowctrl ==
3996 tp->link_config.active_flowctrl) {
3997 current_link_up = 1;
4001 if (current_link_up == 1 &&
4002 tp->link_config.active_duplex == DUPLEX_FULL) {
4005 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4006 reg = MII_TG3_FET_GEN_STAT;
4007 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4009 reg = MII_TG3_EXT_STAT;
4010 bit = MII_TG3_EXT_STAT_MDIX;
4013 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4014 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4016 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4021 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4022 tg3_phy_copper_begin(tp);
4024 tg3_readphy(tp, MII_BMSR, &bmsr);
4025 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4026 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4027 current_link_up = 1;
4030 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4031 if (current_link_up == 1) {
4032 if (tp->link_config.active_speed == SPEED_100 ||
4033 tp->link_config.active_speed == SPEED_10)
4034 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4036 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4037 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4038 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4040 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4042 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4043 if (tp->link_config.active_duplex == DUPLEX_HALF)
4044 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4047 if (current_link_up == 1 &&
4048 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4049 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4051 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4054 /* ??? Without this setting Netgear GA302T PHY does not
4055 * ??? send/receive packets...
4057 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4058 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4059 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4060 tw32_f(MAC_MI_MODE, tp->mi_mode);
4064 tw32_f(MAC_MODE, tp->mac_mode);
4067 tg3_phy_eee_adjust(tp, current_link_up);
4069 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4070 /* Polled via timer. */
4071 tw32_f(MAC_EVENT, 0);
4073 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4078 current_link_up == 1 &&
4079 tp->link_config.active_speed == SPEED_1000 &&
4080 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4083 (MAC_STATUS_SYNC_CHANGED |
4084 MAC_STATUS_CFG_CHANGED));
4087 NIC_SRAM_FIRMWARE_MBOX,
4088 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4091 /* Prevent send BD corruption. */
4092 if (tg3_flag(tp, CLKREQ_BUG)) {
4093 u16 oldlnkctl, newlnkctl;
4095 pci_read_config_word(tp->pdev,
4096 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4098 if (tp->link_config.active_speed == SPEED_100 ||
4099 tp->link_config.active_speed == SPEED_10)
4100 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4102 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4103 if (newlnkctl != oldlnkctl)
4104 pci_write_config_word(tp->pdev,
4105 pci_pcie_cap(tp->pdev) +
4106 PCI_EXP_LNKCTL, newlnkctl);
4109 if (current_link_up != netif_carrier_ok(tp->dev)) {
4110 if (current_link_up)
4111 netif_carrier_on(tp->dev);
4113 netif_carrier_off(tp->dev);
4114 tg3_link_report(tp);
4120 struct tg3_fiber_aneginfo {
4122 #define ANEG_STATE_UNKNOWN 0
4123 #define ANEG_STATE_AN_ENABLE 1
4124 #define ANEG_STATE_RESTART_INIT 2
4125 #define ANEG_STATE_RESTART 3
4126 #define ANEG_STATE_DISABLE_LINK_OK 4
4127 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4128 #define ANEG_STATE_ABILITY_DETECT 6
4129 #define ANEG_STATE_ACK_DETECT_INIT 7
4130 #define ANEG_STATE_ACK_DETECT 8
4131 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4132 #define ANEG_STATE_COMPLETE_ACK 10
4133 #define ANEG_STATE_IDLE_DETECT_INIT 11
4134 #define ANEG_STATE_IDLE_DETECT 12
4135 #define ANEG_STATE_LINK_OK 13
4136 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4137 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4140 #define MR_AN_ENABLE 0x00000001
4141 #define MR_RESTART_AN 0x00000002
4142 #define MR_AN_COMPLETE 0x00000004
4143 #define MR_PAGE_RX 0x00000008
4144 #define MR_NP_LOADED 0x00000010
4145 #define MR_TOGGLE_TX 0x00000020
4146 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4147 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4148 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4149 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4150 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4151 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4152 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4153 #define MR_TOGGLE_RX 0x00002000
4154 #define MR_NP_RX 0x00004000
4156 #define MR_LINK_OK 0x80000000
4158 unsigned long link_time, cur_time;
4160 u32 ability_match_cfg;
4161 int ability_match_count;
4163 char ability_match, idle_match, ack_match;
4165 u32 txconfig, rxconfig;
4166 #define ANEG_CFG_NP 0x00000080
4167 #define ANEG_CFG_ACK 0x00000040
4168 #define ANEG_CFG_RF2 0x00000020
4169 #define ANEG_CFG_RF1 0x00000010
4170 #define ANEG_CFG_PS2 0x00000001
4171 #define ANEG_CFG_PS1 0x00008000
4172 #define ANEG_CFG_HD 0x00004000
4173 #define ANEG_CFG_FD 0x00002000
4174 #define ANEG_CFG_INVAL 0x00001f06
4179 #define ANEG_TIMER_ENAB 2
4180 #define ANEG_FAILED -1
4182 #define ANEG_STATE_SETTLE_TIME 10000
4184 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4185 struct tg3_fiber_aneginfo *ap)
4188 unsigned long delta;
4192 if (ap->state == ANEG_STATE_UNKNOWN) {
4196 ap->ability_match_cfg = 0;
4197 ap->ability_match_count = 0;
4198 ap->ability_match = 0;
4204 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4205 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4207 if (rx_cfg_reg != ap->ability_match_cfg) {
4208 ap->ability_match_cfg = rx_cfg_reg;
4209 ap->ability_match = 0;
4210 ap->ability_match_count = 0;
4212 if (++ap->ability_match_count > 1) {
4213 ap->ability_match = 1;
4214 ap->ability_match_cfg = rx_cfg_reg;
4217 if (rx_cfg_reg & ANEG_CFG_ACK)
4225 ap->ability_match_cfg = 0;
4226 ap->ability_match_count = 0;
4227 ap->ability_match = 0;
4233 ap->rxconfig = rx_cfg_reg;
4236 switch (ap->state) {
4237 case ANEG_STATE_UNKNOWN:
4238 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4239 ap->state = ANEG_STATE_AN_ENABLE;
4242 case ANEG_STATE_AN_ENABLE:
4243 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4244 if (ap->flags & MR_AN_ENABLE) {
4247 ap->ability_match_cfg = 0;
4248 ap->ability_match_count = 0;
4249 ap->ability_match = 0;
4253 ap->state = ANEG_STATE_RESTART_INIT;
4255 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4259 case ANEG_STATE_RESTART_INIT:
4260 ap->link_time = ap->cur_time;
4261 ap->flags &= ~(MR_NP_LOADED);
4263 tw32(MAC_TX_AUTO_NEG, 0);
4264 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4265 tw32_f(MAC_MODE, tp->mac_mode);
4268 ret = ANEG_TIMER_ENAB;
4269 ap->state = ANEG_STATE_RESTART;
4272 case ANEG_STATE_RESTART:
4273 delta = ap->cur_time - ap->link_time;
4274 if (delta > ANEG_STATE_SETTLE_TIME)
4275 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4277 ret = ANEG_TIMER_ENAB;
4280 case ANEG_STATE_DISABLE_LINK_OK:
4284 case ANEG_STATE_ABILITY_DETECT_INIT:
4285 ap->flags &= ~(MR_TOGGLE_TX);
4286 ap->txconfig = ANEG_CFG_FD;
4287 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4288 if (flowctrl & ADVERTISE_1000XPAUSE)
4289 ap->txconfig |= ANEG_CFG_PS1;
4290 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4291 ap->txconfig |= ANEG_CFG_PS2;
4292 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4293 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4294 tw32_f(MAC_MODE, tp->mac_mode);
4297 ap->state = ANEG_STATE_ABILITY_DETECT;
4300 case ANEG_STATE_ABILITY_DETECT:
4301 if (ap->ability_match != 0 && ap->rxconfig != 0)
4302 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4305 case ANEG_STATE_ACK_DETECT_INIT:
4306 ap->txconfig |= ANEG_CFG_ACK;
4307 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4308 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4309 tw32_f(MAC_MODE, tp->mac_mode);
4312 ap->state = ANEG_STATE_ACK_DETECT;
4315 case ANEG_STATE_ACK_DETECT:
4316 if (ap->ack_match != 0) {
4317 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4318 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4319 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4321 ap->state = ANEG_STATE_AN_ENABLE;
4323 } else if (ap->ability_match != 0 &&
4324 ap->rxconfig == 0) {
4325 ap->state = ANEG_STATE_AN_ENABLE;
4329 case ANEG_STATE_COMPLETE_ACK_INIT:
4330 if (ap->rxconfig & ANEG_CFG_INVAL) {
4334 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4335 MR_LP_ADV_HALF_DUPLEX |
4336 MR_LP_ADV_SYM_PAUSE |
4337 MR_LP_ADV_ASYM_PAUSE |
4338 MR_LP_ADV_REMOTE_FAULT1 |
4339 MR_LP_ADV_REMOTE_FAULT2 |
4340 MR_LP_ADV_NEXT_PAGE |
4343 if (ap->rxconfig & ANEG_CFG_FD)
4344 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4345 if (ap->rxconfig & ANEG_CFG_HD)
4346 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4347 if (ap->rxconfig & ANEG_CFG_PS1)
4348 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4349 if (ap->rxconfig & ANEG_CFG_PS2)
4350 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4351 if (ap->rxconfig & ANEG_CFG_RF1)
4352 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4353 if (ap->rxconfig & ANEG_CFG_RF2)
4354 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4355 if (ap->rxconfig & ANEG_CFG_NP)
4356 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4358 ap->link_time = ap->cur_time;
4360 ap->flags ^= (MR_TOGGLE_TX);
4361 if (ap->rxconfig & 0x0008)
4362 ap->flags |= MR_TOGGLE_RX;
4363 if (ap->rxconfig & ANEG_CFG_NP)
4364 ap->flags |= MR_NP_RX;
4365 ap->flags |= MR_PAGE_RX;
4367 ap->state = ANEG_STATE_COMPLETE_ACK;
4368 ret = ANEG_TIMER_ENAB;
4371 case ANEG_STATE_COMPLETE_ACK:
4372 if (ap->ability_match != 0 &&
4373 ap->rxconfig == 0) {
4374 ap->state = ANEG_STATE_AN_ENABLE;
4377 delta = ap->cur_time - ap->link_time;
4378 if (delta > ANEG_STATE_SETTLE_TIME) {
4379 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4380 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4382 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4383 !(ap->flags & MR_NP_RX)) {
4384 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4392 case ANEG_STATE_IDLE_DETECT_INIT:
4393 ap->link_time = ap->cur_time;
4394 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4395 tw32_f(MAC_MODE, tp->mac_mode);
4398 ap->state = ANEG_STATE_IDLE_DETECT;
4399 ret = ANEG_TIMER_ENAB;
4402 case ANEG_STATE_IDLE_DETECT:
4403 if (ap->ability_match != 0 &&
4404 ap->rxconfig == 0) {
4405 ap->state = ANEG_STATE_AN_ENABLE;
4408 delta = ap->cur_time - ap->link_time;
4409 if (delta > ANEG_STATE_SETTLE_TIME) {
4410 /* XXX another gem from the Broadcom driver :( */
4411 ap->state = ANEG_STATE_LINK_OK;
4415 case ANEG_STATE_LINK_OK:
4416 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4420 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4421 /* ??? unimplemented */
4424 case ANEG_STATE_NEXT_PAGE_WAIT:
4425 /* ??? unimplemented */
4436 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4439 struct tg3_fiber_aneginfo aninfo;
4440 int status = ANEG_FAILED;
4444 tw32_f(MAC_TX_AUTO_NEG, 0);
4446 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4447 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4450 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4453 memset(&aninfo, 0, sizeof(aninfo));
4454 aninfo.flags |= MR_AN_ENABLE;
4455 aninfo.state = ANEG_STATE_UNKNOWN;
4456 aninfo.cur_time = 0;
4458 while (++tick < 195000) {
4459 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4460 if (status == ANEG_DONE || status == ANEG_FAILED)
4466 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4467 tw32_f(MAC_MODE, tp->mac_mode);
4470 *txflags = aninfo.txconfig;
4471 *rxflags = aninfo.flags;
4473 if (status == ANEG_DONE &&
4474 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4475 MR_LP_ADV_FULL_DUPLEX)))
4481 static void tg3_init_bcm8002(struct tg3 *tp)
4483 u32 mac_status = tr32(MAC_STATUS);
4486 /* Reset when initting first time or we have a link. */
4487 if (tg3_flag(tp, INIT_COMPLETE) &&
4488 !(mac_status & MAC_STATUS_PCS_SYNCED))
4491 /* Set PLL lock range. */
4492 tg3_writephy(tp, 0x16, 0x8007);
4495 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4497 /* Wait for reset to complete. */
4498 /* XXX schedule_timeout() ... */
4499 for (i = 0; i < 500; i++)
4502 /* Config mode; select PMA/Ch 1 regs. */
4503 tg3_writephy(tp, 0x10, 0x8411);
4505 /* Enable auto-lock and comdet, select txclk for tx. */
4506 tg3_writephy(tp, 0x11, 0x0a10);
4508 tg3_writephy(tp, 0x18, 0x00a0);
4509 tg3_writephy(tp, 0x16, 0x41ff);
4511 /* Assert and deassert POR. */
4512 tg3_writephy(tp, 0x13, 0x0400);
4514 tg3_writephy(tp, 0x13, 0x0000);
4516 tg3_writephy(tp, 0x11, 0x0a50);
4518 tg3_writephy(tp, 0x11, 0x0a10);
4520 /* Wait for signal to stabilize */
4521 /* XXX schedule_timeout() ... */
4522 for (i = 0; i < 15000; i++)
4525 /* Deselect the channel register so we can read the PHYID
4528 tg3_writephy(tp, 0x10, 0x8011);
4531 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4534 u32 sg_dig_ctrl, sg_dig_status;
4535 u32 serdes_cfg, expected_sg_dig_ctrl;
4536 int workaround, port_a;
4537 int current_link_up;
4540 expected_sg_dig_ctrl = 0;
4543 current_link_up = 0;
4545 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4546 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4548 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4551 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4552 /* preserve bits 20-23 for voltage regulator */
4553 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4556 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4558 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4559 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4561 u32 val = serdes_cfg;
4567 tw32_f(MAC_SERDES_CFG, val);
4570 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4572 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4573 tg3_setup_flow_control(tp, 0, 0);
4574 current_link_up = 1;
4579 /* Want auto-negotiation. */
4580 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4582 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4583 if (flowctrl & ADVERTISE_1000XPAUSE)
4584 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4585 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4586 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4588 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4589 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4590 tp->serdes_counter &&
4591 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4592 MAC_STATUS_RCVD_CFG)) ==
4593 MAC_STATUS_PCS_SYNCED)) {
4594 tp->serdes_counter--;
4595 current_link_up = 1;
4600 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4601 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4603 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4605 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4606 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4607 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4608 MAC_STATUS_SIGNAL_DET)) {
4609 sg_dig_status = tr32(SG_DIG_STATUS);
4610 mac_status = tr32(MAC_STATUS);
4612 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4613 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4614 u32 local_adv = 0, remote_adv = 0;
4616 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4617 local_adv |= ADVERTISE_1000XPAUSE;
4618 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4619 local_adv |= ADVERTISE_1000XPSE_ASYM;
4621 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4622 remote_adv |= LPA_1000XPAUSE;
4623 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4624 remote_adv |= LPA_1000XPAUSE_ASYM;
4626 tp->link_config.rmt_adv =
4627 mii_adv_to_ethtool_adv_x(remote_adv);
4629 tg3_setup_flow_control(tp, local_adv, remote_adv);
4630 current_link_up = 1;
4631 tp->serdes_counter = 0;
4632 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4633 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4634 if (tp->serdes_counter)
4635 tp->serdes_counter--;
4638 u32 val = serdes_cfg;
4645 tw32_f(MAC_SERDES_CFG, val);
4648 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4651 /* Link parallel detection - link is up */
4652 /* only if we have PCS_SYNC and not */
4653 /* receiving config code words */
4654 mac_status = tr32(MAC_STATUS);
4655 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4656 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4657 tg3_setup_flow_control(tp, 0, 0);
4658 current_link_up = 1;
4660 TG3_PHYFLG_PARALLEL_DETECT;
4661 tp->serdes_counter =
4662 SERDES_PARALLEL_DET_TIMEOUT;
4664 goto restart_autoneg;
4668 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4669 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4673 return current_link_up;
4676 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4678 int current_link_up = 0;
4680 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4683 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4684 u32 txflags, rxflags;
4687 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4688 u32 local_adv = 0, remote_adv = 0;
4690 if (txflags & ANEG_CFG_PS1)
4691 local_adv |= ADVERTISE_1000XPAUSE;
4692 if (txflags & ANEG_CFG_PS2)
4693 local_adv |= ADVERTISE_1000XPSE_ASYM;
4695 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4696 remote_adv |= LPA_1000XPAUSE;
4697 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4698 remote_adv |= LPA_1000XPAUSE_ASYM;
4700 tp->link_config.rmt_adv =
4701 mii_adv_to_ethtool_adv_x(remote_adv);
4703 tg3_setup_flow_control(tp, local_adv, remote_adv);
4705 current_link_up = 1;
4707 for (i = 0; i < 30; i++) {
4710 (MAC_STATUS_SYNC_CHANGED |
4711 MAC_STATUS_CFG_CHANGED));
4713 if ((tr32(MAC_STATUS) &
4714 (MAC_STATUS_SYNC_CHANGED |
4715 MAC_STATUS_CFG_CHANGED)) == 0)
4719 mac_status = tr32(MAC_STATUS);
4720 if (current_link_up == 0 &&
4721 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4722 !(mac_status & MAC_STATUS_RCVD_CFG))
4723 current_link_up = 1;
4725 tg3_setup_flow_control(tp, 0, 0);
4727 /* Forcing 1000FD link up. */
4728 current_link_up = 1;
4730 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4733 tw32_f(MAC_MODE, tp->mac_mode);
4738 return current_link_up;
4741 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4744 u16 orig_active_speed;
4745 u8 orig_active_duplex;
4747 int current_link_up;
4750 orig_pause_cfg = tp->link_config.active_flowctrl;
4751 orig_active_speed = tp->link_config.active_speed;
4752 orig_active_duplex = tp->link_config.active_duplex;
4754 if (!tg3_flag(tp, HW_AUTONEG) &&
4755 netif_carrier_ok(tp->dev) &&
4756 tg3_flag(tp, INIT_COMPLETE)) {
4757 mac_status = tr32(MAC_STATUS);
4758 mac_status &= (MAC_STATUS_PCS_SYNCED |
4759 MAC_STATUS_SIGNAL_DET |
4760 MAC_STATUS_CFG_CHANGED |
4761 MAC_STATUS_RCVD_CFG);
4762 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4763 MAC_STATUS_SIGNAL_DET)) {
4764 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4765 MAC_STATUS_CFG_CHANGED));
4770 tw32_f(MAC_TX_AUTO_NEG, 0);
4772 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4773 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4774 tw32_f(MAC_MODE, tp->mac_mode);
4777 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4778 tg3_init_bcm8002(tp);
4780 /* Enable link change event even when serdes polling. */
4781 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4784 current_link_up = 0;
4785 tp->link_config.rmt_adv = 0;
4786 mac_status = tr32(MAC_STATUS);
4788 if (tg3_flag(tp, HW_AUTONEG))
4789 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4791 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4793 tp->napi[0].hw_status->status =
4794 (SD_STATUS_UPDATED |
4795 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4797 for (i = 0; i < 100; i++) {
4798 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4799 MAC_STATUS_CFG_CHANGED));
4801 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4802 MAC_STATUS_CFG_CHANGED |
4803 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4807 mac_status = tr32(MAC_STATUS);
4808 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4809 current_link_up = 0;
4810 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4811 tp->serdes_counter == 0) {
4812 tw32_f(MAC_MODE, (tp->mac_mode |
4813 MAC_MODE_SEND_CONFIGS));
4815 tw32_f(MAC_MODE, tp->mac_mode);
4819 if (current_link_up == 1) {
4820 tp->link_config.active_speed = SPEED_1000;
4821 tp->link_config.active_duplex = DUPLEX_FULL;
4822 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4823 LED_CTRL_LNKLED_OVERRIDE |
4824 LED_CTRL_1000MBPS_ON));
4826 tp->link_config.active_speed = SPEED_INVALID;
4827 tp->link_config.active_duplex = DUPLEX_INVALID;
4828 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4829 LED_CTRL_LNKLED_OVERRIDE |
4830 LED_CTRL_TRAFFIC_OVERRIDE));
4833 if (current_link_up != netif_carrier_ok(tp->dev)) {
4834 if (current_link_up)
4835 netif_carrier_on(tp->dev);
4837 netif_carrier_off(tp->dev);
4838 tg3_link_report(tp);
4840 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4841 if (orig_pause_cfg != now_pause_cfg ||
4842 orig_active_speed != tp->link_config.active_speed ||
4843 orig_active_duplex != tp->link_config.active_duplex)
4844 tg3_link_report(tp);
4850 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4852 int current_link_up, err = 0;
4856 u32 local_adv, remote_adv;
4858 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4859 tw32_f(MAC_MODE, tp->mac_mode);
4865 (MAC_STATUS_SYNC_CHANGED |
4866 MAC_STATUS_CFG_CHANGED |
4867 MAC_STATUS_MI_COMPLETION |
4868 MAC_STATUS_LNKSTATE_CHANGED));
4874 current_link_up = 0;
4875 current_speed = SPEED_INVALID;
4876 current_duplex = DUPLEX_INVALID;
4877 tp->link_config.rmt_adv = 0;
4879 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4880 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4881 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4882 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4883 bmsr |= BMSR_LSTATUS;
4885 bmsr &= ~BMSR_LSTATUS;
4888 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4890 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4891 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4892 /* do nothing, just check for link up at the end */
4893 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4896 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4897 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4898 ADVERTISE_1000XPAUSE |
4899 ADVERTISE_1000XPSE_ASYM |
4902 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4903 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
4905 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
4906 tg3_writephy(tp, MII_ADVERTISE, newadv);
4907 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4908 tg3_writephy(tp, MII_BMCR, bmcr);
4910 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4911 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4912 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4919 bmcr &= ~BMCR_SPEED1000;
4920 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4922 if (tp->link_config.duplex == DUPLEX_FULL)
4923 new_bmcr |= BMCR_FULLDPLX;
4925 if (new_bmcr != bmcr) {
4926 /* BMCR_SPEED1000 is a reserved bit that needs
4927 * to be set on write.
4929 new_bmcr |= BMCR_SPEED1000;
4931 /* Force a linkdown */
4932 if (netif_carrier_ok(tp->dev)) {
4935 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4936 adv &= ~(ADVERTISE_1000XFULL |
4937 ADVERTISE_1000XHALF |
4939 tg3_writephy(tp, MII_ADVERTISE, adv);
4940 tg3_writephy(tp, MII_BMCR, bmcr |
4944 netif_carrier_off(tp->dev);
4946 tg3_writephy(tp, MII_BMCR, new_bmcr);
4948 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4949 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4950 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4952 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4953 bmsr |= BMSR_LSTATUS;
4955 bmsr &= ~BMSR_LSTATUS;
4957 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4961 if (bmsr & BMSR_LSTATUS) {
4962 current_speed = SPEED_1000;
4963 current_link_up = 1;
4964 if (bmcr & BMCR_FULLDPLX)
4965 current_duplex = DUPLEX_FULL;
4967 current_duplex = DUPLEX_HALF;
4972 if (bmcr & BMCR_ANENABLE) {
4975 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4976 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4977 common = local_adv & remote_adv;
4978 if (common & (ADVERTISE_1000XHALF |
4979 ADVERTISE_1000XFULL)) {
4980 if (common & ADVERTISE_1000XFULL)
4981 current_duplex = DUPLEX_FULL;
4983 current_duplex = DUPLEX_HALF;
4985 tp->link_config.rmt_adv =
4986 mii_adv_to_ethtool_adv_x(remote_adv);
4987 } else if (!tg3_flag(tp, 5780_CLASS)) {
4988 /* Link is up via parallel detect */
4990 current_link_up = 0;
4995 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4996 tg3_setup_flow_control(tp, local_adv, remote_adv);
4998 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4999 if (tp->link_config.active_duplex == DUPLEX_HALF)
5000 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5002 tw32_f(MAC_MODE, tp->mac_mode);
5005 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5007 tp->link_config.active_speed = current_speed;
5008 tp->link_config.active_duplex = current_duplex;
5010 if (current_link_up != netif_carrier_ok(tp->dev)) {
5011 if (current_link_up)
5012 netif_carrier_on(tp->dev);
5014 netif_carrier_off(tp->dev);
5015 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5017 tg3_link_report(tp);
5022 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5024 if (tp->serdes_counter) {
5025 /* Give autoneg time to complete. */
5026 tp->serdes_counter--;
5030 if (!netif_carrier_ok(tp->dev) &&
5031 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5034 tg3_readphy(tp, MII_BMCR, &bmcr);
5035 if (bmcr & BMCR_ANENABLE) {
5038 /* Select shadow register 0x1f */
5039 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5040 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5042 /* Select expansion interrupt status register */
5043 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5044 MII_TG3_DSP_EXP1_INT_STAT);
5045 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5046 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5048 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5049 /* We have signal detect and not receiving
5050 * config code words, link is up by parallel
5054 bmcr &= ~BMCR_ANENABLE;
5055 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5056 tg3_writephy(tp, MII_BMCR, bmcr);
5057 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5060 } else if (netif_carrier_ok(tp->dev) &&
5061 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5062 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5065 /* Select expansion interrupt status register */
5066 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5067 MII_TG3_DSP_EXP1_INT_STAT);
5068 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5072 /* Config code words received, turn on autoneg. */
5073 tg3_readphy(tp, MII_BMCR, &bmcr);
5074 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5076 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5082 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5087 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5088 err = tg3_setup_fiber_phy(tp, force_reset);
5089 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5090 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5092 err = tg3_setup_copper_phy(tp, force_reset);
5094 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5097 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5098 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5100 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5105 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5106 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5107 tw32(GRC_MISC_CFG, val);
5110 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5111 (6 << TX_LENGTHS_IPG_SHIFT);
5112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5113 val |= tr32(MAC_TX_LENGTHS) &
5114 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5115 TX_LENGTHS_CNT_DWN_VAL_MSK);
5117 if (tp->link_config.active_speed == SPEED_1000 &&
5118 tp->link_config.active_duplex == DUPLEX_HALF)
5119 tw32(MAC_TX_LENGTHS, val |
5120 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5122 tw32(MAC_TX_LENGTHS, val |
5123 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5125 if (!tg3_flag(tp, 5705_PLUS)) {
5126 if (netif_carrier_ok(tp->dev)) {
5127 tw32(HOSTCC_STAT_COAL_TICKS,
5128 tp->coal.stats_block_coalesce_usecs);
5130 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5134 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5135 val = tr32(PCIE_PWR_MGMT_THRESH);
5136 if (!netif_carrier_ok(tp->dev))
5137 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5140 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5141 tw32(PCIE_PWR_MGMT_THRESH, val);
5147 static inline int tg3_irq_sync(struct tg3 *tp)
5149 return tp->irq_sync;
5152 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5156 dst = (u32 *)((u8 *)dst + off);
5157 for (i = 0; i < len; i += sizeof(u32))
5158 *dst++ = tr32(off + i);
5161 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5163 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5164 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5165 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5166 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5167 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5168 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5169 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5170 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5171 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5172 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5173 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5174 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5175 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5176 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5177 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5178 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5179 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5180 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5181 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5183 if (tg3_flag(tp, SUPPORT_MSIX))
5184 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5186 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5187 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5188 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5189 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5190 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5191 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5192 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5193 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5195 if (!tg3_flag(tp, 5705_PLUS)) {
5196 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5197 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5198 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5201 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5202 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5203 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5204 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5205 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5207 if (tg3_flag(tp, NVRAM))
5208 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5211 static void tg3_dump_state(struct tg3 *tp)
5216 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5218 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5222 if (tg3_flag(tp, PCI_EXPRESS)) {
5223 /* Read up to but not including private PCI registers */
5224 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5225 regs[i / sizeof(u32)] = tr32(i);
5227 tg3_dump_legacy_regs(tp, regs);
5229 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5230 if (!regs[i + 0] && !regs[i + 1] &&
5231 !regs[i + 2] && !regs[i + 3])
5234 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5236 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5241 for (i = 0; i < tp->irq_cnt; i++) {
5242 struct tg3_napi *tnapi = &tp->napi[i];
5244 /* SW status block */
5246 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5248 tnapi->hw_status->status,
5249 tnapi->hw_status->status_tag,
5250 tnapi->hw_status->rx_jumbo_consumer,
5251 tnapi->hw_status->rx_consumer,
5252 tnapi->hw_status->rx_mini_consumer,
5253 tnapi->hw_status->idx[0].rx_producer,
5254 tnapi->hw_status->idx[0].tx_consumer);
5257 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5259 tnapi->last_tag, tnapi->last_irq_tag,
5260 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5262 tnapi->prodring.rx_std_prod_idx,
5263 tnapi->prodring.rx_std_cons_idx,
5264 tnapi->prodring.rx_jmb_prod_idx,
5265 tnapi->prodring.rx_jmb_cons_idx);
5269 /* This is called whenever we suspect that the system chipset is re-
5270 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5271 * is bogus tx completions. We try to recover by setting the
5272 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5275 static void tg3_tx_recover(struct tg3 *tp)
5277 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5278 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5280 netdev_warn(tp->dev,
5281 "The system may be re-ordering memory-mapped I/O "
5282 "cycles to the network device, attempting to recover. "
5283 "Please report the problem to the driver maintainer "
5284 "and include system chipset information.\n");
5286 spin_lock(&tp->lock);
5287 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5288 spin_unlock(&tp->lock);
5291 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5293 /* Tell compiler to fetch tx indices from memory. */
5295 return tnapi->tx_pending -
5296 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5299 /* Tigon3 never reports partial packet sends. So we do not
5300 * need special logic to handle SKBs that have not had all
5301 * of their frags sent yet, like SunGEM does.
5303 static void tg3_tx(struct tg3_napi *tnapi)
5305 struct tg3 *tp = tnapi->tp;
5306 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5307 u32 sw_idx = tnapi->tx_cons;
5308 struct netdev_queue *txq;
5309 int index = tnapi - tp->napi;
5310 unsigned int pkts_compl = 0, bytes_compl = 0;
5312 if (tg3_flag(tp, ENABLE_TSS))
5315 txq = netdev_get_tx_queue(tp->dev, index);
5317 while (sw_idx != hw_idx) {
5318 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5319 struct sk_buff *skb = ri->skb;
5322 if (unlikely(skb == NULL)) {
5327 pci_unmap_single(tp->pdev,
5328 dma_unmap_addr(ri, mapping),
5334 while (ri->fragmented) {
5335 ri->fragmented = false;
5336 sw_idx = NEXT_TX(sw_idx);
5337 ri = &tnapi->tx_buffers[sw_idx];
5340 sw_idx = NEXT_TX(sw_idx);
5342 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5343 ri = &tnapi->tx_buffers[sw_idx];
5344 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5347 pci_unmap_page(tp->pdev,
5348 dma_unmap_addr(ri, mapping),
5349 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5352 while (ri->fragmented) {
5353 ri->fragmented = false;
5354 sw_idx = NEXT_TX(sw_idx);
5355 ri = &tnapi->tx_buffers[sw_idx];
5358 sw_idx = NEXT_TX(sw_idx);
5362 bytes_compl += skb->len;
5366 if (unlikely(tx_bug)) {
5372 netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5374 tnapi->tx_cons = sw_idx;
5376 /* Need to make the tx_cons update visible to tg3_start_xmit()
5377 * before checking for netif_queue_stopped(). Without the
5378 * memory barrier, there is a small possibility that tg3_start_xmit()
5379 * will miss it and cause the queue to be stopped forever.
5383 if (unlikely(netif_tx_queue_stopped(txq) &&
5384 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5385 __netif_tx_lock(txq, smp_processor_id());
5386 if (netif_tx_queue_stopped(txq) &&
5387 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5388 netif_tx_wake_queue(txq);
5389 __netif_tx_unlock(txq);
5393 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5398 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5399 map_sz, PCI_DMA_FROMDEVICE);
5404 /* Returns size of skb allocated or < 0 on error.
5406 * We only need to fill in the address because the other members
5407 * of the RX descriptor are invariant, see tg3_init_rings.
5409 * Note the purposeful assymetry of cpu vs. chip accesses. For
5410 * posting buffers we only dirty the first cache line of the RX
5411 * descriptor (containing the address). Whereas for the RX status
5412 * buffers the cpu only reads the last cacheline of the RX descriptor
5413 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5415 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5416 u32 opaque_key, u32 dest_idx_unmasked)
5418 struct tg3_rx_buffer_desc *desc;
5419 struct ring_info *map;
5422 int skb_size, data_size, dest_idx;
5424 switch (opaque_key) {
5425 case RXD_OPAQUE_RING_STD:
5426 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5427 desc = &tpr->rx_std[dest_idx];
5428 map = &tpr->rx_std_buffers[dest_idx];
5429 data_size = tp->rx_pkt_map_sz;
5432 case RXD_OPAQUE_RING_JUMBO:
5433 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5434 desc = &tpr->rx_jmb[dest_idx].std;
5435 map = &tpr->rx_jmb_buffers[dest_idx];
5436 data_size = TG3_RX_JMB_MAP_SZ;
5443 /* Do not overwrite any of the map or rp information
5444 * until we are sure we can commit to a new buffer.
5446 * Callers depend upon this behavior and assume that
5447 * we leave everything unchanged if we fail.
5449 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5450 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5451 data = kmalloc(skb_size, GFP_ATOMIC);
5455 mapping = pci_map_single(tp->pdev,
5456 data + TG3_RX_OFFSET(tp),
5458 PCI_DMA_FROMDEVICE);
5459 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5465 dma_unmap_addr_set(map, mapping, mapping);
5467 desc->addr_hi = ((u64)mapping >> 32);
5468 desc->addr_lo = ((u64)mapping & 0xffffffff);
5473 /* We only need to move over in the address because the other
5474 * members of the RX descriptor are invariant. See notes above
5475 * tg3_alloc_rx_data for full details.
5477 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5478 struct tg3_rx_prodring_set *dpr,
5479 u32 opaque_key, int src_idx,
5480 u32 dest_idx_unmasked)
5482 struct tg3 *tp = tnapi->tp;
5483 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5484 struct ring_info *src_map, *dest_map;
5485 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5488 switch (opaque_key) {
5489 case RXD_OPAQUE_RING_STD:
5490 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5491 dest_desc = &dpr->rx_std[dest_idx];
5492 dest_map = &dpr->rx_std_buffers[dest_idx];
5493 src_desc = &spr->rx_std[src_idx];
5494 src_map = &spr->rx_std_buffers[src_idx];
5497 case RXD_OPAQUE_RING_JUMBO:
5498 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5499 dest_desc = &dpr->rx_jmb[dest_idx].std;
5500 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5501 src_desc = &spr->rx_jmb[src_idx].std;
5502 src_map = &spr->rx_jmb_buffers[src_idx];
5509 dest_map->data = src_map->data;
5510 dma_unmap_addr_set(dest_map, mapping,
5511 dma_unmap_addr(src_map, mapping));
5512 dest_desc->addr_hi = src_desc->addr_hi;
5513 dest_desc->addr_lo = src_desc->addr_lo;
5515 /* Ensure that the update to the skb happens after the physical
5516 * addresses have been transferred to the new BD location.
5520 src_map->data = NULL;
5523 /* The RX ring scheme is composed of multiple rings which post fresh
5524 * buffers to the chip, and one special ring the chip uses to report
5525 * status back to the host.
5527 * The special ring reports the status of received packets to the
5528 * host. The chip does not write into the original descriptor the
5529 * RX buffer was obtained from. The chip simply takes the original
5530 * descriptor as provided by the host, updates the status and length
5531 * field, then writes this into the next status ring entry.
5533 * Each ring the host uses to post buffers to the chip is described
5534 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5535 * it is first placed into the on-chip ram. When the packet's length
5536 * is known, it walks down the TG3_BDINFO entries to select the ring.
5537 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5538 * which is within the range of the new packet's length is chosen.
5540 * The "separate ring for rx status" scheme may sound queer, but it makes
5541 * sense from a cache coherency perspective. If only the host writes
5542 * to the buffer post rings, and only the chip writes to the rx status
5543 * rings, then cache lines never move beyond shared-modified state.
5544 * If both the host and chip were to write into the same ring, cache line
5545 * eviction could occur since both entities want it in an exclusive state.
5547 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5549 struct tg3 *tp = tnapi->tp;
5550 u32 work_mask, rx_std_posted = 0;
5551 u32 std_prod_idx, jmb_prod_idx;
5552 u32 sw_idx = tnapi->rx_rcb_ptr;
5555 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5557 hw_idx = *(tnapi->rx_rcb_prod_idx);
5559 * We need to order the read of hw_idx and the read of
5560 * the opaque cookie.
5565 std_prod_idx = tpr->rx_std_prod_idx;
5566 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5567 while (sw_idx != hw_idx && budget > 0) {
5568 struct ring_info *ri;
5569 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5571 struct sk_buff *skb;
5572 dma_addr_t dma_addr;
5573 u32 opaque_key, desc_idx, *post_ptr;
5576 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5577 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5578 if (opaque_key == RXD_OPAQUE_RING_STD) {
5579 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5580 dma_addr = dma_unmap_addr(ri, mapping);
5582 post_ptr = &std_prod_idx;
5584 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5585 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5586 dma_addr = dma_unmap_addr(ri, mapping);
5588 post_ptr = &jmb_prod_idx;
5590 goto next_pkt_nopost;
5592 work_mask |= opaque_key;
5594 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5595 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5597 tg3_recycle_rx(tnapi, tpr, opaque_key,
5598 desc_idx, *post_ptr);
5600 /* Other statistics kept track of by card. */
5605 prefetch(data + TG3_RX_OFFSET(tp));
5606 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5609 if (len > TG3_RX_COPY_THRESH(tp)) {
5612 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5617 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5618 PCI_DMA_FROMDEVICE);
5620 skb = build_skb(data);
5623 goto drop_it_no_recycle;
5625 skb_reserve(skb, TG3_RX_OFFSET(tp));
5626 /* Ensure that the update to the data happens
5627 * after the usage of the old DMA mapping.
5634 tg3_recycle_rx(tnapi, tpr, opaque_key,
5635 desc_idx, *post_ptr);
5637 skb = netdev_alloc_skb(tp->dev,
5638 len + TG3_RAW_IP_ALIGN);
5640 goto drop_it_no_recycle;
5642 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5643 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5645 data + TG3_RX_OFFSET(tp),
5647 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5651 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5652 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5653 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5654 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5655 skb->ip_summed = CHECKSUM_UNNECESSARY;
5657 skb_checksum_none_assert(skb);
5659 skb->protocol = eth_type_trans(skb, tp->dev);
5661 if (len > (tp->dev->mtu + ETH_HLEN) &&
5662 skb->protocol != htons(ETH_P_8021Q)) {
5664 goto drop_it_no_recycle;
5667 if (desc->type_flags & RXD_FLAG_VLAN &&
5668 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5669 __vlan_hwaccel_put_tag(skb,
5670 desc->err_vlan & RXD_VLAN_MASK);
5672 napi_gro_receive(&tnapi->napi, skb);
5680 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5681 tpr->rx_std_prod_idx = std_prod_idx &
5682 tp->rx_std_ring_mask;
5683 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5684 tpr->rx_std_prod_idx);
5685 work_mask &= ~RXD_OPAQUE_RING_STD;
5690 sw_idx &= tp->rx_ret_ring_mask;
5692 /* Refresh hw_idx to see if there is new work */
5693 if (sw_idx == hw_idx) {
5694 hw_idx = *(tnapi->rx_rcb_prod_idx);
5699 /* ACK the status ring. */
5700 tnapi->rx_rcb_ptr = sw_idx;
5701 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5703 /* Refill RX ring(s). */
5704 if (!tg3_flag(tp, ENABLE_RSS)) {
5705 if (work_mask & RXD_OPAQUE_RING_STD) {
5706 tpr->rx_std_prod_idx = std_prod_idx &
5707 tp->rx_std_ring_mask;
5708 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5709 tpr->rx_std_prod_idx);
5711 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5712 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5713 tp->rx_jmb_ring_mask;
5714 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5715 tpr->rx_jmb_prod_idx);
5718 } else if (work_mask) {
5719 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5720 * updated before the producer indices can be updated.
5724 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5725 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5727 if (tnapi != &tp->napi[1])
5728 napi_schedule(&tp->napi[1].napi);
5734 static void tg3_poll_link(struct tg3 *tp)
5736 /* handle link change and other phy events */
5737 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5738 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5740 if (sblk->status & SD_STATUS_LINK_CHG) {
5741 sblk->status = SD_STATUS_UPDATED |
5742 (sblk->status & ~SD_STATUS_LINK_CHG);
5743 spin_lock(&tp->lock);
5744 if (tg3_flag(tp, USE_PHYLIB)) {
5746 (MAC_STATUS_SYNC_CHANGED |
5747 MAC_STATUS_CFG_CHANGED |
5748 MAC_STATUS_MI_COMPLETION |
5749 MAC_STATUS_LNKSTATE_CHANGED));
5752 tg3_setup_phy(tp, 0);
5753 spin_unlock(&tp->lock);
5758 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5759 struct tg3_rx_prodring_set *dpr,
5760 struct tg3_rx_prodring_set *spr)
5762 u32 si, di, cpycnt, src_prod_idx;
5766 src_prod_idx = spr->rx_std_prod_idx;
5768 /* Make sure updates to the rx_std_buffers[] entries and the
5769 * standard producer index are seen in the correct order.
5773 if (spr->rx_std_cons_idx == src_prod_idx)
5776 if (spr->rx_std_cons_idx < src_prod_idx)
5777 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5779 cpycnt = tp->rx_std_ring_mask + 1 -
5780 spr->rx_std_cons_idx;
5782 cpycnt = min(cpycnt,
5783 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5785 si = spr->rx_std_cons_idx;
5786 di = dpr->rx_std_prod_idx;
5788 for (i = di; i < di + cpycnt; i++) {
5789 if (dpr->rx_std_buffers[i].data) {
5799 /* Ensure that updates to the rx_std_buffers ring and the
5800 * shadowed hardware producer ring from tg3_recycle_skb() are
5801 * ordered correctly WRT the skb check above.
5805 memcpy(&dpr->rx_std_buffers[di],
5806 &spr->rx_std_buffers[si],
5807 cpycnt * sizeof(struct ring_info));
5809 for (i = 0; i < cpycnt; i++, di++, si++) {
5810 struct tg3_rx_buffer_desc *sbd, *dbd;
5811 sbd = &spr->rx_std[si];
5812 dbd = &dpr->rx_std[di];
5813 dbd->addr_hi = sbd->addr_hi;
5814 dbd->addr_lo = sbd->addr_lo;
5817 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5818 tp->rx_std_ring_mask;
5819 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5820 tp->rx_std_ring_mask;
5824 src_prod_idx = spr->rx_jmb_prod_idx;
5826 /* Make sure updates to the rx_jmb_buffers[] entries and
5827 * the jumbo producer index are seen in the correct order.
5831 if (spr->rx_jmb_cons_idx == src_prod_idx)
5834 if (spr->rx_jmb_cons_idx < src_prod_idx)
5835 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5837 cpycnt = tp->rx_jmb_ring_mask + 1 -
5838 spr->rx_jmb_cons_idx;
5840 cpycnt = min(cpycnt,
5841 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5843 si = spr->rx_jmb_cons_idx;
5844 di = dpr->rx_jmb_prod_idx;
5846 for (i = di; i < di + cpycnt; i++) {
5847 if (dpr->rx_jmb_buffers[i].data) {
5857 /* Ensure that updates to the rx_jmb_buffers ring and the
5858 * shadowed hardware producer ring from tg3_recycle_skb() are
5859 * ordered correctly WRT the skb check above.
5863 memcpy(&dpr->rx_jmb_buffers[di],
5864 &spr->rx_jmb_buffers[si],
5865 cpycnt * sizeof(struct ring_info));
5867 for (i = 0; i < cpycnt; i++, di++, si++) {
5868 struct tg3_rx_buffer_desc *sbd, *dbd;
5869 sbd = &spr->rx_jmb[si].std;
5870 dbd = &dpr->rx_jmb[di].std;
5871 dbd->addr_hi = sbd->addr_hi;
5872 dbd->addr_lo = sbd->addr_lo;
5875 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5876 tp->rx_jmb_ring_mask;
5877 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5878 tp->rx_jmb_ring_mask;
5884 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5886 struct tg3 *tp = tnapi->tp;
5888 /* run TX completion thread */
5889 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5891 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5895 /* run RX thread, within the bounds set by NAPI.
5896 * All RX "locking" is done by ensuring outside
5897 * code synchronizes with tg3->napi.poll()
5899 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5900 work_done += tg3_rx(tnapi, budget - work_done);
5902 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5903 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5905 u32 std_prod_idx = dpr->rx_std_prod_idx;
5906 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5908 for (i = 1; i < tp->irq_cnt; i++)
5909 err |= tg3_rx_prodring_xfer(tp, dpr,
5910 &tp->napi[i].prodring);
5914 if (std_prod_idx != dpr->rx_std_prod_idx)
5915 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5916 dpr->rx_std_prod_idx);
5918 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5919 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5920 dpr->rx_jmb_prod_idx);
5925 tw32_f(HOSTCC_MODE, tp->coal_now);
5931 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5933 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5934 schedule_work(&tp->reset_task);
5937 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5939 cancel_work_sync(&tp->reset_task);
5940 tg3_flag_clear(tp, RESET_TASK_PENDING);
5943 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5945 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5946 struct tg3 *tp = tnapi->tp;
5948 struct tg3_hw_status *sblk = tnapi->hw_status;
5951 work_done = tg3_poll_work(tnapi, work_done, budget);
5953 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5956 if (unlikely(work_done >= budget))
5959 /* tp->last_tag is used in tg3_int_reenable() below
5960 * to tell the hw how much work has been processed,
5961 * so we must read it before checking for more work.
5963 tnapi->last_tag = sblk->status_tag;
5964 tnapi->last_irq_tag = tnapi->last_tag;
5967 /* check for RX/TX work to do */
5968 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5969 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5970 napi_complete(napi);
5971 /* Reenable interrupts. */
5972 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5981 /* work_done is guaranteed to be less than budget. */
5982 napi_complete(napi);
5983 tg3_reset_task_schedule(tp);
5987 static void tg3_process_error(struct tg3 *tp)
5990 bool real_error = false;
5992 if (tg3_flag(tp, ERROR_PROCESSED))
5995 /* Check Flow Attention register */
5996 val = tr32(HOSTCC_FLOW_ATTN);
5997 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5998 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6002 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6003 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6007 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6008 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6017 tg3_flag_set(tp, ERROR_PROCESSED);
6018 tg3_reset_task_schedule(tp);
6021 static int tg3_poll(struct napi_struct *napi, int budget)
6023 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6024 struct tg3 *tp = tnapi->tp;
6026 struct tg3_hw_status *sblk = tnapi->hw_status;
6029 if (sblk->status & SD_STATUS_ERROR)
6030 tg3_process_error(tp);
6034 work_done = tg3_poll_work(tnapi, work_done, budget);
6036 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6039 if (unlikely(work_done >= budget))
6042 if (tg3_flag(tp, TAGGED_STATUS)) {
6043 /* tp->last_tag is used in tg3_int_reenable() below
6044 * to tell the hw how much work has been processed,
6045 * so we must read it before checking for more work.
6047 tnapi->last_tag = sblk->status_tag;
6048 tnapi->last_irq_tag = tnapi->last_tag;
6051 sblk->status &= ~SD_STATUS_UPDATED;
6053 if (likely(!tg3_has_work(tnapi))) {
6054 napi_complete(napi);
6055 tg3_int_reenable(tnapi);
6063 /* work_done is guaranteed to be less than budget. */
6064 napi_complete(napi);
6065 tg3_reset_task_schedule(tp);
6069 static void tg3_napi_disable(struct tg3 *tp)
6073 for (i = tp->irq_cnt - 1; i >= 0; i--)
6074 napi_disable(&tp->napi[i].napi);
6077 static void tg3_napi_enable(struct tg3 *tp)
6081 for (i = 0; i < tp->irq_cnt; i++)
6082 napi_enable(&tp->napi[i].napi);
6085 static void tg3_napi_init(struct tg3 *tp)
6089 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6090 for (i = 1; i < tp->irq_cnt; i++)
6091 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6094 static void tg3_napi_fini(struct tg3 *tp)
6098 for (i = 0; i < tp->irq_cnt; i++)
6099 netif_napi_del(&tp->napi[i].napi);
6102 static inline void tg3_netif_stop(struct tg3 *tp)
6104 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6105 tg3_napi_disable(tp);
6106 netif_tx_disable(tp->dev);
6109 static inline void tg3_netif_start(struct tg3 *tp)
6111 /* NOTE: unconditional netif_tx_wake_all_queues is only
6112 * appropriate so long as all callers are assured to
6113 * have free tx slots (such as after tg3_init_hw)
6115 netif_tx_wake_all_queues(tp->dev);
6117 tg3_napi_enable(tp);
6118 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6119 tg3_enable_ints(tp);
6122 static void tg3_irq_quiesce(struct tg3 *tp)
6126 BUG_ON(tp->irq_sync);
6131 for (i = 0; i < tp->irq_cnt; i++)
6132 synchronize_irq(tp->napi[i].irq_vec);
6135 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6136 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6137 * with as well. Most of the time, this is not necessary except when
6138 * shutting down the device.
6140 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6142 spin_lock_bh(&tp->lock);
6144 tg3_irq_quiesce(tp);
6147 static inline void tg3_full_unlock(struct tg3 *tp)
6149 spin_unlock_bh(&tp->lock);
6152 /* One-shot MSI handler - Chip automatically disables interrupt
6153 * after sending MSI so driver doesn't have to do it.
6155 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6157 struct tg3_napi *tnapi = dev_id;
6158 struct tg3 *tp = tnapi->tp;
6160 prefetch(tnapi->hw_status);
6162 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6164 if (likely(!tg3_irq_sync(tp)))
6165 napi_schedule(&tnapi->napi);
6170 /* MSI ISR - No need to check for interrupt sharing and no need to
6171 * flush status block and interrupt mailbox. PCI ordering rules
6172 * guarantee that MSI will arrive after the status block.
6174 static irqreturn_t tg3_msi(int irq, void *dev_id)
6176 struct tg3_napi *tnapi = dev_id;
6177 struct tg3 *tp = tnapi->tp;
6179 prefetch(tnapi->hw_status);
6181 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6183 * Writing any value to intr-mbox-0 clears PCI INTA# and
6184 * chip-internal interrupt pending events.
6185 * Writing non-zero to intr-mbox-0 additional tells the
6186 * NIC to stop sending us irqs, engaging "in-intr-handler"
6189 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6190 if (likely(!tg3_irq_sync(tp)))
6191 napi_schedule(&tnapi->napi);
6193 return IRQ_RETVAL(1);
6196 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6198 struct tg3_napi *tnapi = dev_id;
6199 struct tg3 *tp = tnapi->tp;
6200 struct tg3_hw_status *sblk = tnapi->hw_status;
6201 unsigned int handled = 1;
6203 /* In INTx mode, it is possible for the interrupt to arrive at
6204 * the CPU before the status block posted prior to the interrupt.
6205 * Reading the PCI State register will confirm whether the
6206 * interrupt is ours and will flush the status block.
6208 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6209 if (tg3_flag(tp, CHIP_RESETTING) ||
6210 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6217 * Writing any value to intr-mbox-0 clears PCI INTA# and
6218 * chip-internal interrupt pending events.
6219 * Writing non-zero to intr-mbox-0 additional tells the
6220 * NIC to stop sending us irqs, engaging "in-intr-handler"
6223 * Flush the mailbox to de-assert the IRQ immediately to prevent
6224 * spurious interrupts. The flush impacts performance but
6225 * excessive spurious interrupts can be worse in some cases.
6227 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6228 if (tg3_irq_sync(tp))
6230 sblk->status &= ~SD_STATUS_UPDATED;
6231 if (likely(tg3_has_work(tnapi))) {
6232 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6233 napi_schedule(&tnapi->napi);
6235 /* No work, shared interrupt perhaps? re-enable
6236 * interrupts, and flush that PCI write
6238 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6242 return IRQ_RETVAL(handled);
6245 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6247 struct tg3_napi *tnapi = dev_id;
6248 struct tg3 *tp = tnapi->tp;
6249 struct tg3_hw_status *sblk = tnapi->hw_status;
6250 unsigned int handled = 1;
6252 /* In INTx mode, it is possible for the interrupt to arrive at
6253 * the CPU before the status block posted prior to the interrupt.
6254 * Reading the PCI State register will confirm whether the
6255 * interrupt is ours and will flush the status block.
6257 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6258 if (tg3_flag(tp, CHIP_RESETTING) ||
6259 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6266 * writing any value to intr-mbox-0 clears PCI INTA# and
6267 * chip-internal interrupt pending events.
6268 * writing non-zero to intr-mbox-0 additional tells the
6269 * NIC to stop sending us irqs, engaging "in-intr-handler"
6272 * Flush the mailbox to de-assert the IRQ immediately to prevent
6273 * spurious interrupts. The flush impacts performance but
6274 * excessive spurious interrupts can be worse in some cases.
6276 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6279 * In a shared interrupt configuration, sometimes other devices'
6280 * interrupts will scream. We record the current status tag here
6281 * so that the above check can report that the screaming interrupts
6282 * are unhandled. Eventually they will be silenced.
6284 tnapi->last_irq_tag = sblk->status_tag;
6286 if (tg3_irq_sync(tp))
6289 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6291 napi_schedule(&tnapi->napi);
6294 return IRQ_RETVAL(handled);
6297 /* ISR for interrupt test */
6298 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6300 struct tg3_napi *tnapi = dev_id;
6301 struct tg3 *tp = tnapi->tp;
6302 struct tg3_hw_status *sblk = tnapi->hw_status;
6304 if ((sblk->status & SD_STATUS_UPDATED) ||
6305 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6306 tg3_disable_ints(tp);
6307 return IRQ_RETVAL(1);
6309 return IRQ_RETVAL(0);
6312 static int tg3_init_hw(struct tg3 *, int);
6313 static int tg3_halt(struct tg3 *, int, int);
6315 /* Restart hardware after configuration changes, self-test, etc.
6316 * Invoked with tp->lock held.
6318 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6319 __releases(tp->lock)
6320 __acquires(tp->lock)
6324 err = tg3_init_hw(tp, reset_phy);
6327 "Failed to re-initialize device, aborting\n");
6328 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6329 tg3_full_unlock(tp);
6330 del_timer_sync(&tp->timer);
6332 tg3_napi_enable(tp);
6334 tg3_full_lock(tp, 0);
6339 #ifdef CONFIG_NET_POLL_CONTROLLER
6340 static void tg3_poll_controller(struct net_device *dev)
6343 struct tg3 *tp = netdev_priv(dev);
6345 for (i = 0; i < tp->irq_cnt; i++)
6346 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6350 static void tg3_reset_task(struct work_struct *work)
6352 struct tg3 *tp = container_of(work, struct tg3, reset_task);
6355 tg3_full_lock(tp, 0);
6357 if (!netif_running(tp->dev)) {
6358 tg3_flag_clear(tp, RESET_TASK_PENDING);
6359 tg3_full_unlock(tp);
6363 tg3_full_unlock(tp);
6369 tg3_full_lock(tp, 1);
6371 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6372 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6373 tp->write32_rx_mbox = tg3_write_flush_reg32;
6374 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6375 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6378 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6379 err = tg3_init_hw(tp, 1);
6383 tg3_netif_start(tp);
6386 tg3_full_unlock(tp);
6391 tg3_flag_clear(tp, RESET_TASK_PENDING);
6394 static void tg3_tx_timeout(struct net_device *dev)
6396 struct tg3 *tp = netdev_priv(dev);
6398 if (netif_msg_tx_err(tp)) {
6399 netdev_err(dev, "transmit timed out, resetting\n");
6403 tg3_reset_task_schedule(tp);
6406 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6407 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6409 u32 base = (u32) mapping & 0xffffffff;
6411 return (base > 0xffffdcc0) && (base + len + 8 < base);
6414 /* Test for DMA addresses > 40-bit */
6415 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6418 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6419 if (tg3_flag(tp, 40BIT_DMA_BUG))
6420 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6427 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6428 dma_addr_t mapping, u32 len, u32 flags,
6431 txbd->addr_hi = ((u64) mapping >> 32);
6432 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6433 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6434 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6437 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6438 dma_addr_t map, u32 len, u32 flags,
6441 struct tg3 *tp = tnapi->tp;
6444 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6447 if (tg3_4g_overflow_test(map, len))
6450 if (tg3_40bit_overflow_test(tp, map, len))
6453 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6454 u32 prvidx = *entry;
6455 u32 tmp_flag = flags & ~TXD_FLAG_END;
6456 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6457 u32 frag_len = TG3_TX_BD_DMA_MAX;
6458 len -= TG3_TX_BD_DMA_MAX;
6460 /* Avoid the 8byte DMA problem */
6462 len += TG3_TX_BD_DMA_MAX / 2;
6463 frag_len = TG3_TX_BD_DMA_MAX / 2;
6466 tnapi->tx_buffers[*entry].fragmented = true;
6468 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6469 frag_len, tmp_flag, mss, vlan);
6472 *entry = NEXT_TX(*entry);
6479 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6480 len, flags, mss, vlan);
6482 *entry = NEXT_TX(*entry);
6485 tnapi->tx_buffers[prvidx].fragmented = false;
6489 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6490 len, flags, mss, vlan);
6491 *entry = NEXT_TX(*entry);
6497 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6500 struct sk_buff *skb;
6501 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6506 pci_unmap_single(tnapi->tp->pdev,
6507 dma_unmap_addr(txb, mapping),
6511 while (txb->fragmented) {
6512 txb->fragmented = false;
6513 entry = NEXT_TX(entry);
6514 txb = &tnapi->tx_buffers[entry];
6517 for (i = 0; i <= last; i++) {
6518 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6520 entry = NEXT_TX(entry);
6521 txb = &tnapi->tx_buffers[entry];
6523 pci_unmap_page(tnapi->tp->pdev,
6524 dma_unmap_addr(txb, mapping),
6525 skb_frag_size(frag), PCI_DMA_TODEVICE);
6527 while (txb->fragmented) {
6528 txb->fragmented = false;
6529 entry = NEXT_TX(entry);
6530 txb = &tnapi->tx_buffers[entry];
6535 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6536 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6537 struct sk_buff **pskb,
6538 u32 *entry, u32 *budget,
6539 u32 base_flags, u32 mss, u32 vlan)
6541 struct tg3 *tp = tnapi->tp;
6542 struct sk_buff *new_skb, *skb = *pskb;
6543 dma_addr_t new_addr = 0;
6546 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6547 new_skb = skb_copy(skb, GFP_ATOMIC);
6549 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6551 new_skb = skb_copy_expand(skb,
6552 skb_headroom(skb) + more_headroom,
6553 skb_tailroom(skb), GFP_ATOMIC);
6559 /* New SKB is guaranteed to be linear. */
6560 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6562 /* Make sure the mapping succeeded */
6563 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6564 dev_kfree_skb(new_skb);
6567 u32 save_entry = *entry;
6569 base_flags |= TXD_FLAG_END;
6571 tnapi->tx_buffers[*entry].skb = new_skb;
6572 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6575 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6576 new_skb->len, base_flags,
6578 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6579 dev_kfree_skb(new_skb);
6590 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6592 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6593 * TSO header is greater than 80 bytes.
6595 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6597 struct sk_buff *segs, *nskb;
6598 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6600 /* Estimate the number of fragments in the worst case */
6601 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6602 netif_stop_queue(tp->dev);
6604 /* netif_tx_stop_queue() must be done before checking
6605 * checking tx index in tg3_tx_avail() below, because in
6606 * tg3_tx(), we update tx index before checking for
6607 * netif_tx_queue_stopped().
6610 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6611 return NETDEV_TX_BUSY;
6613 netif_wake_queue(tp->dev);
6616 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6618 goto tg3_tso_bug_end;
6624 tg3_start_xmit(nskb, tp->dev);
6630 return NETDEV_TX_OK;
6633 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6634 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6636 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6638 struct tg3 *tp = netdev_priv(dev);
6639 u32 len, entry, base_flags, mss, vlan = 0;
6641 int i = -1, would_hit_hwbug;
6643 struct tg3_napi *tnapi;
6644 struct netdev_queue *txq;
6647 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6648 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6649 if (tg3_flag(tp, ENABLE_TSS))
6652 budget = tg3_tx_avail(tnapi);
6654 /* We are running in BH disabled context with netif_tx_lock
6655 * and TX reclaim runs via tp->napi.poll inside of a software
6656 * interrupt. Furthermore, IRQ processing runs lockless so we have
6657 * no IRQ context deadlocks to worry about either. Rejoice!
6659 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6660 if (!netif_tx_queue_stopped(txq)) {
6661 netif_tx_stop_queue(txq);
6663 /* This is a hard error, log it. */
6665 "BUG! Tx Ring full when queue awake!\n");
6667 return NETDEV_TX_BUSY;
6670 entry = tnapi->tx_prod;
6672 if (skb->ip_summed == CHECKSUM_PARTIAL)
6673 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6675 mss = skb_shinfo(skb)->gso_size;
6678 u32 tcp_opt_len, hdr_len;
6680 if (skb_header_cloned(skb) &&
6681 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6685 tcp_opt_len = tcp_optlen(skb);
6687 if (skb_is_gso_v6(skb)) {
6688 hdr_len = skb_headlen(skb) - ETH_HLEN;
6692 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6693 hdr_len = ip_tcp_len + tcp_opt_len;
6696 iph->tot_len = htons(mss + hdr_len);
6699 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6700 tg3_flag(tp, TSO_BUG))
6701 return tg3_tso_bug(tp, skb);
6703 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6704 TXD_FLAG_CPU_POST_DMA);
6706 if (tg3_flag(tp, HW_TSO_1) ||
6707 tg3_flag(tp, HW_TSO_2) ||
6708 tg3_flag(tp, HW_TSO_3)) {
6709 tcp_hdr(skb)->check = 0;
6710 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6712 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6717 if (tg3_flag(tp, HW_TSO_3)) {
6718 mss |= (hdr_len & 0xc) << 12;
6720 base_flags |= 0x00000010;
6721 base_flags |= (hdr_len & 0x3e0) << 5;
6722 } else if (tg3_flag(tp, HW_TSO_2))
6723 mss |= hdr_len << 9;
6724 else if (tg3_flag(tp, HW_TSO_1) ||
6725 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6726 if (tcp_opt_len || iph->ihl > 5) {
6729 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6730 mss |= (tsflags << 11);
6733 if (tcp_opt_len || iph->ihl > 5) {
6736 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6737 base_flags |= tsflags << 12;
6742 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6743 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6744 base_flags |= TXD_FLAG_JMB_PKT;
6746 if (vlan_tx_tag_present(skb)) {
6747 base_flags |= TXD_FLAG_VLAN;
6748 vlan = vlan_tx_tag_get(skb);
6751 len = skb_headlen(skb);
6753 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6754 if (pci_dma_mapping_error(tp->pdev, mapping))
6758 tnapi->tx_buffers[entry].skb = skb;
6759 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6761 would_hit_hwbug = 0;
6763 if (tg3_flag(tp, 5701_DMA_BUG))
6764 would_hit_hwbug = 1;
6766 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6767 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6769 would_hit_hwbug = 1;
6770 /* Now loop through additional data fragments, and queue them. */
6771 } else if (skb_shinfo(skb)->nr_frags > 0) {
6774 if (!tg3_flag(tp, HW_TSO_1) &&
6775 !tg3_flag(tp, HW_TSO_2) &&
6776 !tg3_flag(tp, HW_TSO_3))
6779 last = skb_shinfo(skb)->nr_frags - 1;
6780 for (i = 0; i <= last; i++) {
6781 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6783 len = skb_frag_size(frag);
6784 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6785 len, DMA_TO_DEVICE);
6787 tnapi->tx_buffers[entry].skb = NULL;
6788 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6790 if (dma_mapping_error(&tp->pdev->dev, mapping))
6794 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6796 ((i == last) ? TXD_FLAG_END : 0),
6798 would_hit_hwbug = 1;
6804 if (would_hit_hwbug) {
6805 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6807 /* If the workaround fails due to memory/mapping
6808 * failure, silently drop this packet.
6810 entry = tnapi->tx_prod;
6811 budget = tg3_tx_avail(tnapi);
6812 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6813 base_flags, mss, vlan))
6817 skb_tx_timestamp(skb);
6818 netdev_sent_queue(tp->dev, skb->len);
6820 /* Packets are ready, update Tx producer idx local and on card. */
6821 tw32_tx_mbox(tnapi->prodmbox, entry);
6823 tnapi->tx_prod = entry;
6824 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6825 netif_tx_stop_queue(txq);
6827 /* netif_tx_stop_queue() must be done before checking
6828 * checking tx index in tg3_tx_avail() below, because in
6829 * tg3_tx(), we update tx index before checking for
6830 * netif_tx_queue_stopped().
6833 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6834 netif_tx_wake_queue(txq);
6838 return NETDEV_TX_OK;
6841 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6842 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6847 return NETDEV_TX_OK;
6850 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6853 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6854 MAC_MODE_PORT_MODE_MASK);
6856 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6858 if (!tg3_flag(tp, 5705_PLUS))
6859 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6861 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6862 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6864 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6866 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6868 if (tg3_flag(tp, 5705_PLUS) ||
6869 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6870 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6871 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6874 tw32(MAC_MODE, tp->mac_mode);
6878 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6880 u32 val, bmcr, mac_mode, ptest = 0;
6882 tg3_phy_toggle_apd(tp, false);
6883 tg3_phy_toggle_automdix(tp, 0);
6885 if (extlpbk && tg3_phy_set_extloopbk(tp))
6888 bmcr = BMCR_FULLDPLX;
6893 bmcr |= BMCR_SPEED100;
6897 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6899 bmcr |= BMCR_SPEED100;
6902 bmcr |= BMCR_SPEED1000;
6907 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6908 tg3_readphy(tp, MII_CTRL1000, &val);
6909 val |= CTL1000_AS_MASTER |
6910 CTL1000_ENABLE_MASTER;
6911 tg3_writephy(tp, MII_CTRL1000, val);
6913 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6914 MII_TG3_FET_PTEST_TRIM_2;
6915 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6918 bmcr |= BMCR_LOOPBACK;
6920 tg3_writephy(tp, MII_BMCR, bmcr);
6922 /* The write needs to be flushed for the FETs */
6923 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6924 tg3_readphy(tp, MII_BMCR, &bmcr);
6928 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6930 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6931 MII_TG3_FET_PTEST_FRC_TX_LINK |
6932 MII_TG3_FET_PTEST_FRC_TX_LOCK);
6934 /* The write needs to be flushed for the AC131 */
6935 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6938 /* Reset to prevent losing 1st rx packet intermittently */
6939 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6940 tg3_flag(tp, 5780_CLASS)) {
6941 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6943 tw32_f(MAC_RX_MODE, tp->rx_mode);
6946 mac_mode = tp->mac_mode &
6947 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6948 if (speed == SPEED_1000)
6949 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6951 mac_mode |= MAC_MODE_PORT_MODE_MII;
6953 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6954 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6956 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6957 mac_mode &= ~MAC_MODE_LINK_POLARITY;
6958 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6959 mac_mode |= MAC_MODE_LINK_POLARITY;
6961 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6962 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6965 tw32(MAC_MODE, mac_mode);
6971 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
6973 struct tg3 *tp = netdev_priv(dev);
6975 if (features & NETIF_F_LOOPBACK) {
6976 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6979 spin_lock_bh(&tp->lock);
6980 tg3_mac_loopback(tp, true);
6981 netif_carrier_on(tp->dev);
6982 spin_unlock_bh(&tp->lock);
6983 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6985 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6988 spin_lock_bh(&tp->lock);
6989 tg3_mac_loopback(tp, false);
6990 /* Force link status check */
6991 tg3_setup_phy(tp, 1);
6992 spin_unlock_bh(&tp->lock);
6993 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6997 static netdev_features_t tg3_fix_features(struct net_device *dev,
6998 netdev_features_t features)
7000 struct tg3 *tp = netdev_priv(dev);
7002 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7003 features &= ~NETIF_F_ALL_TSO;
7008 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7010 netdev_features_t changed = dev->features ^ features;
7012 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7013 tg3_set_loopback(dev, features);
7018 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7023 if (new_mtu > ETH_DATA_LEN) {
7024 if (tg3_flag(tp, 5780_CLASS)) {
7025 netdev_update_features(dev);
7026 tg3_flag_clear(tp, TSO_CAPABLE);
7028 tg3_flag_set(tp, JUMBO_RING_ENABLE);
7031 if (tg3_flag(tp, 5780_CLASS)) {
7032 tg3_flag_set(tp, TSO_CAPABLE);
7033 netdev_update_features(dev);
7035 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7039 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7041 struct tg3 *tp = netdev_priv(dev);
7044 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7047 if (!netif_running(dev)) {
7048 /* We'll just catch it later when the
7051 tg3_set_mtu(dev, tp, new_mtu);
7059 tg3_full_lock(tp, 1);
7061 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7063 tg3_set_mtu(dev, tp, new_mtu);
7065 err = tg3_restart_hw(tp, 0);
7068 tg3_netif_start(tp);
7070 tg3_full_unlock(tp);
7078 static void tg3_rx_prodring_free(struct tg3 *tp,
7079 struct tg3_rx_prodring_set *tpr)
7083 if (tpr != &tp->napi[0].prodring) {
7084 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7085 i = (i + 1) & tp->rx_std_ring_mask)
7086 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7089 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7090 for (i = tpr->rx_jmb_cons_idx;
7091 i != tpr->rx_jmb_prod_idx;
7092 i = (i + 1) & tp->rx_jmb_ring_mask) {
7093 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7101 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7102 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7105 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7106 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7107 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7112 /* Initialize rx rings for packet processing.
7114 * The chip has been shut down and the driver detached from
7115 * the networking, so no interrupts or new tx packets will
7116 * end up in the driver. tp->{tx,}lock are held and thus
7119 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7120 struct tg3_rx_prodring_set *tpr)
7122 u32 i, rx_pkt_dma_sz;
7124 tpr->rx_std_cons_idx = 0;
7125 tpr->rx_std_prod_idx = 0;
7126 tpr->rx_jmb_cons_idx = 0;
7127 tpr->rx_jmb_prod_idx = 0;
7129 if (tpr != &tp->napi[0].prodring) {
7130 memset(&tpr->rx_std_buffers[0], 0,
7131 TG3_RX_STD_BUFF_RING_SIZE(tp));
7132 if (tpr->rx_jmb_buffers)
7133 memset(&tpr->rx_jmb_buffers[0], 0,
7134 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7138 /* Zero out all descriptors. */
7139 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7141 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7142 if (tg3_flag(tp, 5780_CLASS) &&
7143 tp->dev->mtu > ETH_DATA_LEN)
7144 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7145 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7147 /* Initialize invariants of the rings, we only set this
7148 * stuff once. This works because the card does not
7149 * write into the rx buffer posting rings.
7151 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7152 struct tg3_rx_buffer_desc *rxd;
7154 rxd = &tpr->rx_std[i];
7155 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7156 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7157 rxd->opaque = (RXD_OPAQUE_RING_STD |
7158 (i << RXD_OPAQUE_INDEX_SHIFT));
7161 /* Now allocate fresh SKBs for each rx ring. */
7162 for (i = 0; i < tp->rx_pending; i++) {
7163 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7164 netdev_warn(tp->dev,
7165 "Using a smaller RX standard ring. Only "
7166 "%d out of %d buffers were allocated "
7167 "successfully\n", i, tp->rx_pending);
7175 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7178 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7180 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7183 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7184 struct tg3_rx_buffer_desc *rxd;
7186 rxd = &tpr->rx_jmb[i].std;
7187 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7188 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7190 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7191 (i << RXD_OPAQUE_INDEX_SHIFT));
7194 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7195 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7196 netdev_warn(tp->dev,
7197 "Using a smaller RX jumbo ring. Only %d "
7198 "out of %d buffers were allocated "
7199 "successfully\n", i, tp->rx_jumbo_pending);
7202 tp->rx_jumbo_pending = i;
7211 tg3_rx_prodring_free(tp, tpr);
7215 static void tg3_rx_prodring_fini(struct tg3 *tp,
7216 struct tg3_rx_prodring_set *tpr)
7218 kfree(tpr->rx_std_buffers);
7219 tpr->rx_std_buffers = NULL;
7220 kfree(tpr->rx_jmb_buffers);
7221 tpr->rx_jmb_buffers = NULL;
7223 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7224 tpr->rx_std, tpr->rx_std_mapping);
7228 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7229 tpr->rx_jmb, tpr->rx_jmb_mapping);
7234 static int tg3_rx_prodring_init(struct tg3 *tp,
7235 struct tg3_rx_prodring_set *tpr)
7237 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7239 if (!tpr->rx_std_buffers)
7242 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7243 TG3_RX_STD_RING_BYTES(tp),
7244 &tpr->rx_std_mapping,
7249 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7250 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7252 if (!tpr->rx_jmb_buffers)
7255 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7256 TG3_RX_JMB_RING_BYTES(tp),
7257 &tpr->rx_jmb_mapping,
7266 tg3_rx_prodring_fini(tp, tpr);
7270 /* Free up pending packets in all rx/tx rings.
7272 * The chip has been shut down and the driver detached from
7273 * the networking, so no interrupts or new tx packets will
7274 * end up in the driver. tp->{tx,}lock is not held and we are not
7275 * in an interrupt context and thus may sleep.
7277 static void tg3_free_rings(struct tg3 *tp)
7281 for (j = 0; j < tp->irq_cnt; j++) {
7282 struct tg3_napi *tnapi = &tp->napi[j];
7284 tg3_rx_prodring_free(tp, &tnapi->prodring);
7286 if (!tnapi->tx_buffers)
7289 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7290 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7295 tg3_tx_skb_unmap(tnapi, i,
7296 skb_shinfo(skb)->nr_frags - 1);
7298 dev_kfree_skb_any(skb);
7301 netdev_reset_queue(tp->dev);
7304 /* Initialize tx/rx rings for packet processing.
7306 * The chip has been shut down and the driver detached from
7307 * the networking, so no interrupts or new tx packets will
7308 * end up in the driver. tp->{tx,}lock are held and thus
7311 static int tg3_init_rings(struct tg3 *tp)
7315 /* Free up all the SKBs. */
7318 for (i = 0; i < tp->irq_cnt; i++) {
7319 struct tg3_napi *tnapi = &tp->napi[i];
7321 tnapi->last_tag = 0;
7322 tnapi->last_irq_tag = 0;
7323 tnapi->hw_status->status = 0;
7324 tnapi->hw_status->status_tag = 0;
7325 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7330 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7332 tnapi->rx_rcb_ptr = 0;
7334 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7336 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7346 * Must not be invoked with interrupt sources disabled and
7347 * the hardware shutdown down.
7349 static void tg3_free_consistent(struct tg3 *tp)
7353 for (i = 0; i < tp->irq_cnt; i++) {
7354 struct tg3_napi *tnapi = &tp->napi[i];
7356 if (tnapi->tx_ring) {
7357 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7358 tnapi->tx_ring, tnapi->tx_desc_mapping);
7359 tnapi->tx_ring = NULL;
7362 kfree(tnapi->tx_buffers);
7363 tnapi->tx_buffers = NULL;
7365 if (tnapi->rx_rcb) {
7366 dma_free_coherent(&tp->pdev->dev,
7367 TG3_RX_RCB_RING_BYTES(tp),
7369 tnapi->rx_rcb_mapping);
7370 tnapi->rx_rcb = NULL;
7373 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7375 if (tnapi->hw_status) {
7376 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7378 tnapi->status_mapping);
7379 tnapi->hw_status = NULL;
7384 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7385 tp->hw_stats, tp->stats_mapping);
7386 tp->hw_stats = NULL;
7391 * Must not be invoked with interrupt sources disabled and
7392 * the hardware shutdown down. Can sleep.
7394 static int tg3_alloc_consistent(struct tg3 *tp)
7398 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7399 sizeof(struct tg3_hw_stats),
7405 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7407 for (i = 0; i < tp->irq_cnt; i++) {
7408 struct tg3_napi *tnapi = &tp->napi[i];
7409 struct tg3_hw_status *sblk;
7411 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7413 &tnapi->status_mapping,
7415 if (!tnapi->hw_status)
7418 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7419 sblk = tnapi->hw_status;
7421 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7424 /* If multivector TSS is enabled, vector 0 does not handle
7425 * tx interrupts. Don't allocate any resources for it.
7427 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7428 (i && tg3_flag(tp, ENABLE_TSS))) {
7429 tnapi->tx_buffers = kzalloc(
7430 sizeof(struct tg3_tx_ring_info) *
7431 TG3_TX_RING_SIZE, GFP_KERNEL);
7432 if (!tnapi->tx_buffers)
7435 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7437 &tnapi->tx_desc_mapping,
7439 if (!tnapi->tx_ring)
7444 * When RSS is enabled, the status block format changes
7445 * slightly. The "rx_jumbo_consumer", "reserved",
7446 * and "rx_mini_consumer" members get mapped to the
7447 * other three rx return ring producer indexes.
7451 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7454 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7457 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7460 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7465 * If multivector RSS is enabled, vector 0 does not handle
7466 * rx or tx interrupts. Don't allocate any resources for it.
7468 if (!i && tg3_flag(tp, ENABLE_RSS))
7471 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7472 TG3_RX_RCB_RING_BYTES(tp),
7473 &tnapi->rx_rcb_mapping,
7478 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7484 tg3_free_consistent(tp);
7488 #define MAX_WAIT_CNT 1000
7490 /* To stop a block, clear the enable bit and poll till it
7491 * clears. tp->lock is held.
7493 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7498 if (tg3_flag(tp, 5705_PLUS)) {
7505 /* We can't enable/disable these bits of the
7506 * 5705/5750, just say success.
7519 for (i = 0; i < MAX_WAIT_CNT; i++) {
7522 if ((val & enable_bit) == 0)
7526 if (i == MAX_WAIT_CNT && !silent) {
7527 dev_err(&tp->pdev->dev,
7528 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7536 /* tp->lock is held. */
7537 static int tg3_abort_hw(struct tg3 *tp, int silent)
7541 tg3_disable_ints(tp);
7543 tp->rx_mode &= ~RX_MODE_ENABLE;
7544 tw32_f(MAC_RX_MODE, tp->rx_mode);
7547 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7548 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7549 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7550 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7551 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7552 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7554 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7555 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7556 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7557 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7558 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7559 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7560 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7562 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7563 tw32_f(MAC_MODE, tp->mac_mode);
7566 tp->tx_mode &= ~TX_MODE_ENABLE;
7567 tw32_f(MAC_TX_MODE, tp->tx_mode);
7569 for (i = 0; i < MAX_WAIT_CNT; i++) {
7571 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7574 if (i >= MAX_WAIT_CNT) {
7575 dev_err(&tp->pdev->dev,
7576 "%s timed out, TX_MODE_ENABLE will not clear "
7577 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7581 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7582 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7583 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7585 tw32(FTQ_RESET, 0xffffffff);
7586 tw32(FTQ_RESET, 0x00000000);
7588 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7589 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7591 for (i = 0; i < tp->irq_cnt; i++) {
7592 struct tg3_napi *tnapi = &tp->napi[i];
7593 if (tnapi->hw_status)
7594 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7600 /* Save PCI command register before chip reset */
7601 static void tg3_save_pci_state(struct tg3 *tp)
7603 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7606 /* Restore PCI state after chip reset */
7607 static void tg3_restore_pci_state(struct tg3 *tp)
7611 /* Re-enable indirect register accesses. */
7612 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7613 tp->misc_host_ctrl);
7615 /* Set MAX PCI retry to zero. */
7616 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7617 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7618 tg3_flag(tp, PCIX_MODE))
7619 val |= PCISTATE_RETRY_SAME_DMA;
7620 /* Allow reads and writes to the APE register and memory space. */
7621 if (tg3_flag(tp, ENABLE_APE))
7622 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7623 PCISTATE_ALLOW_APE_SHMEM_WR |
7624 PCISTATE_ALLOW_APE_PSPACE_WR;
7625 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7627 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7629 if (!tg3_flag(tp, PCI_EXPRESS)) {
7630 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7631 tp->pci_cacheline_sz);
7632 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7636 /* Make sure PCI-X relaxed ordering bit is clear. */
7637 if (tg3_flag(tp, PCIX_MODE)) {
7640 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7642 pcix_cmd &= ~PCI_X_CMD_ERO;
7643 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7647 if (tg3_flag(tp, 5780_CLASS)) {
7649 /* Chip reset on 5780 will reset MSI enable bit,
7650 * so need to restore it.
7652 if (tg3_flag(tp, USING_MSI)) {
7655 pci_read_config_word(tp->pdev,
7656 tp->msi_cap + PCI_MSI_FLAGS,
7658 pci_write_config_word(tp->pdev,
7659 tp->msi_cap + PCI_MSI_FLAGS,
7660 ctrl | PCI_MSI_FLAGS_ENABLE);
7661 val = tr32(MSGINT_MODE);
7662 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7667 /* tp->lock is held. */
7668 static int tg3_chip_reset(struct tg3 *tp)
7671 void (*write_op)(struct tg3 *, u32, u32);
7676 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7678 /* No matching tg3_nvram_unlock() after this because
7679 * chip reset below will undo the nvram lock.
7681 tp->nvram_lock_cnt = 0;
7683 /* GRC_MISC_CFG core clock reset will clear the memory
7684 * enable bit in PCI register 4 and the MSI enable bit
7685 * on some chips, so we save relevant registers here.
7687 tg3_save_pci_state(tp);
7689 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7690 tg3_flag(tp, 5755_PLUS))
7691 tw32(GRC_FASTBOOT_PC, 0);
7694 * We must avoid the readl() that normally takes place.
7695 * It locks machines, causes machine checks, and other
7696 * fun things. So, temporarily disable the 5701
7697 * hardware workaround, while we do the reset.
7699 write_op = tp->write32;
7700 if (write_op == tg3_write_flush_reg32)
7701 tp->write32 = tg3_write32;
7703 /* Prevent the irq handler from reading or writing PCI registers
7704 * during chip reset when the memory enable bit in the PCI command
7705 * register may be cleared. The chip does not generate interrupt
7706 * at this time, but the irq handler may still be called due to irq
7707 * sharing or irqpoll.
7709 tg3_flag_set(tp, CHIP_RESETTING);
7710 for (i = 0; i < tp->irq_cnt; i++) {
7711 struct tg3_napi *tnapi = &tp->napi[i];
7712 if (tnapi->hw_status) {
7713 tnapi->hw_status->status = 0;
7714 tnapi->hw_status->status_tag = 0;
7716 tnapi->last_tag = 0;
7717 tnapi->last_irq_tag = 0;
7721 for (i = 0; i < tp->irq_cnt; i++)
7722 synchronize_irq(tp->napi[i].irq_vec);
7724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7725 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7726 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7730 val = GRC_MISC_CFG_CORECLK_RESET;
7732 if (tg3_flag(tp, PCI_EXPRESS)) {
7733 /* Force PCIe 1.0a mode */
7734 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7735 !tg3_flag(tp, 57765_PLUS) &&
7736 tr32(TG3_PCIE_PHY_TSTCTL) ==
7737 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7738 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7740 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7741 tw32(GRC_MISC_CFG, (1 << 29));
7746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7747 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7748 tw32(GRC_VCPU_EXT_CTRL,
7749 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7752 /* Manage gphy power for all CPMU absent PCIe devices. */
7753 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7754 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7756 tw32(GRC_MISC_CFG, val);
7758 /* restore 5701 hardware bug workaround write method */
7759 tp->write32 = write_op;
7761 /* Unfortunately, we have to delay before the PCI read back.
7762 * Some 575X chips even will not respond to a PCI cfg access
7763 * when the reset command is given to the chip.
7765 * How do these hardware designers expect things to work
7766 * properly if the PCI write is posted for a long period
7767 * of time? It is always necessary to have some method by
7768 * which a register read back can occur to push the write
7769 * out which does the reset.
7771 * For most tg3 variants the trick below was working.
7776 /* Flush PCI posted writes. The normal MMIO registers
7777 * are inaccessible at this time so this is the only
7778 * way to make this reliably (actually, this is no longer
7779 * the case, see above). I tried to use indirect
7780 * register read/write but this upset some 5701 variants.
7782 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7786 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7789 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7793 /* Wait for link training to complete. */
7794 for (i = 0; i < 5000; i++)
7797 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7798 pci_write_config_dword(tp->pdev, 0xc4,
7799 cfg_val | (1 << 15));
7802 /* Clear the "no snoop" and "relaxed ordering" bits. */
7803 pci_read_config_word(tp->pdev,
7804 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7806 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7807 PCI_EXP_DEVCTL_NOSNOOP_EN);
7809 * Older PCIe devices only support the 128 byte
7810 * MPS setting. Enforce the restriction.
7812 if (!tg3_flag(tp, CPMU_PRESENT))
7813 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7814 pci_write_config_word(tp->pdev,
7815 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7818 /* Clear error status */
7819 pci_write_config_word(tp->pdev,
7820 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7821 PCI_EXP_DEVSTA_CED |
7822 PCI_EXP_DEVSTA_NFED |
7823 PCI_EXP_DEVSTA_FED |
7824 PCI_EXP_DEVSTA_URD);
7827 tg3_restore_pci_state(tp);
7829 tg3_flag_clear(tp, CHIP_RESETTING);
7830 tg3_flag_clear(tp, ERROR_PROCESSED);
7833 if (tg3_flag(tp, 5780_CLASS))
7834 val = tr32(MEMARB_MODE);
7835 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7837 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7839 tw32(0x5000, 0x400);
7842 tw32(GRC_MODE, tp->grc_mode);
7844 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7847 tw32(0xc4, val | (1 << 15));
7850 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7852 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7853 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7854 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7855 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7858 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7859 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7861 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7862 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7867 tw32_f(MAC_MODE, val);
7870 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7872 err = tg3_poll_fw(tp);
7878 if (tg3_flag(tp, PCI_EXPRESS) &&
7879 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7880 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7881 !tg3_flag(tp, 57765_PLUS)) {
7884 tw32(0x7c00, val | (1 << 25));
7887 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7888 val = tr32(TG3_CPMU_CLCK_ORIDE);
7889 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7892 /* Reprobe ASF enable state. */
7893 tg3_flag_clear(tp, ENABLE_ASF);
7894 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7895 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7896 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7899 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7900 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7901 tg3_flag_set(tp, ENABLE_ASF);
7902 tp->last_event_jiffies = jiffies;
7903 if (tg3_flag(tp, 5750_PLUS))
7904 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7911 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
7912 struct rtnl_link_stats64 *);
7913 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
7914 struct tg3_ethtool_stats *);
7916 /* tp->lock is held. */
7917 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7923 tg3_write_sig_pre_reset(tp, kind);
7925 tg3_abort_hw(tp, silent);
7926 err = tg3_chip_reset(tp);
7928 __tg3_set_mac_addr(tp, 0);
7930 tg3_write_sig_legacy(tp, kind);
7931 tg3_write_sig_post_reset(tp, kind);
7934 /* Save the stats across chip resets... */
7935 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
7936 tg3_get_estats(tp, &tp->estats_prev);
7938 /* And make sure the next sample is new data */
7939 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7948 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7950 struct tg3 *tp = netdev_priv(dev);
7951 struct sockaddr *addr = p;
7952 int err = 0, skip_mac_1 = 0;
7954 if (!is_valid_ether_addr(addr->sa_data))
7957 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7959 if (!netif_running(dev))
7962 if (tg3_flag(tp, ENABLE_ASF)) {
7963 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7965 addr0_high = tr32(MAC_ADDR_0_HIGH);
7966 addr0_low = tr32(MAC_ADDR_0_LOW);
7967 addr1_high = tr32(MAC_ADDR_1_HIGH);
7968 addr1_low = tr32(MAC_ADDR_1_LOW);
7970 /* Skip MAC addr 1 if ASF is using it. */
7971 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7972 !(addr1_high == 0 && addr1_low == 0))
7975 spin_lock_bh(&tp->lock);
7976 __tg3_set_mac_addr(tp, skip_mac_1);
7977 spin_unlock_bh(&tp->lock);
7982 /* tp->lock is held. */
7983 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7984 dma_addr_t mapping, u32 maxlen_flags,
7988 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7989 ((u64) mapping >> 32));
7991 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7992 ((u64) mapping & 0xffffffff));
7994 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7997 if (!tg3_flag(tp, 5705_PLUS))
7999 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8003 static void __tg3_set_rx_mode(struct net_device *);
8004 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8008 if (!tg3_flag(tp, ENABLE_TSS)) {
8009 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8010 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8011 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8013 tw32(HOSTCC_TXCOL_TICKS, 0);
8014 tw32(HOSTCC_TXMAX_FRAMES, 0);
8015 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8018 if (!tg3_flag(tp, ENABLE_RSS)) {
8019 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8020 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8021 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8023 tw32(HOSTCC_RXCOL_TICKS, 0);
8024 tw32(HOSTCC_RXMAX_FRAMES, 0);
8025 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8028 if (!tg3_flag(tp, 5705_PLUS)) {
8029 u32 val = ec->stats_block_coalesce_usecs;
8031 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8032 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8034 if (!netif_carrier_ok(tp->dev))
8037 tw32(HOSTCC_STAT_COAL_TICKS, val);
8040 for (i = 0; i < tp->irq_cnt - 1; i++) {
8043 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8044 tw32(reg, ec->rx_coalesce_usecs);
8045 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8046 tw32(reg, ec->rx_max_coalesced_frames);
8047 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8048 tw32(reg, ec->rx_max_coalesced_frames_irq);
8050 if (tg3_flag(tp, ENABLE_TSS)) {
8051 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8052 tw32(reg, ec->tx_coalesce_usecs);
8053 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8054 tw32(reg, ec->tx_max_coalesced_frames);
8055 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8056 tw32(reg, ec->tx_max_coalesced_frames_irq);
8060 for (; i < tp->irq_max - 1; i++) {
8061 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8062 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8063 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8065 if (tg3_flag(tp, ENABLE_TSS)) {
8066 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8067 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8068 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8073 /* tp->lock is held. */
8074 static void tg3_rings_reset(struct tg3 *tp)
8077 u32 stblk, txrcb, rxrcb, limit;
8078 struct tg3_napi *tnapi = &tp->napi[0];
8080 /* Disable all transmit rings but the first. */
8081 if (!tg3_flag(tp, 5705_PLUS))
8082 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8083 else if (tg3_flag(tp, 5717_PLUS))
8084 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8085 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8086 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8088 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8090 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8091 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8092 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8093 BDINFO_FLAGS_DISABLED);
8096 /* Disable all receive return rings but the first. */
8097 if (tg3_flag(tp, 5717_PLUS))
8098 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8099 else if (!tg3_flag(tp, 5705_PLUS))
8100 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8101 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8102 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8103 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8105 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8107 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8108 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8109 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8110 BDINFO_FLAGS_DISABLED);
8112 /* Disable interrupts */
8113 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8114 tp->napi[0].chk_msi_cnt = 0;
8115 tp->napi[0].last_rx_cons = 0;
8116 tp->napi[0].last_tx_cons = 0;
8118 /* Zero mailbox registers. */
8119 if (tg3_flag(tp, SUPPORT_MSIX)) {
8120 for (i = 1; i < tp->irq_max; i++) {
8121 tp->napi[i].tx_prod = 0;
8122 tp->napi[i].tx_cons = 0;
8123 if (tg3_flag(tp, ENABLE_TSS))
8124 tw32_mailbox(tp->napi[i].prodmbox, 0);
8125 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8126 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8127 tp->napi[i].chk_msi_cnt = 0;
8128 tp->napi[i].last_rx_cons = 0;
8129 tp->napi[i].last_tx_cons = 0;
8131 if (!tg3_flag(tp, ENABLE_TSS))
8132 tw32_mailbox(tp->napi[0].prodmbox, 0);
8134 tp->napi[0].tx_prod = 0;
8135 tp->napi[0].tx_cons = 0;
8136 tw32_mailbox(tp->napi[0].prodmbox, 0);
8137 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8140 /* Make sure the NIC-based send BD rings are disabled. */
8141 if (!tg3_flag(tp, 5705_PLUS)) {
8142 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8143 for (i = 0; i < 16; i++)
8144 tw32_tx_mbox(mbox + i * 8, 0);
8147 txrcb = NIC_SRAM_SEND_RCB;
8148 rxrcb = NIC_SRAM_RCV_RET_RCB;
8150 /* Clear status block in ram. */
8151 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8153 /* Set status block DMA address */
8154 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8155 ((u64) tnapi->status_mapping >> 32));
8156 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8157 ((u64) tnapi->status_mapping & 0xffffffff));
8159 if (tnapi->tx_ring) {
8160 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8161 (TG3_TX_RING_SIZE <<
8162 BDINFO_FLAGS_MAXLEN_SHIFT),
8163 NIC_SRAM_TX_BUFFER_DESC);
8164 txrcb += TG3_BDINFO_SIZE;
8167 if (tnapi->rx_rcb) {
8168 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8169 (tp->rx_ret_ring_mask + 1) <<
8170 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8171 rxrcb += TG3_BDINFO_SIZE;
8174 stblk = HOSTCC_STATBLCK_RING1;
8176 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8177 u64 mapping = (u64)tnapi->status_mapping;
8178 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8179 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8181 /* Clear status block in ram. */
8182 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8184 if (tnapi->tx_ring) {
8185 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8186 (TG3_TX_RING_SIZE <<
8187 BDINFO_FLAGS_MAXLEN_SHIFT),
8188 NIC_SRAM_TX_BUFFER_DESC);
8189 txrcb += TG3_BDINFO_SIZE;
8192 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8193 ((tp->rx_ret_ring_mask + 1) <<
8194 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8197 rxrcb += TG3_BDINFO_SIZE;
8201 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8203 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8205 if (!tg3_flag(tp, 5750_PLUS) ||
8206 tg3_flag(tp, 5780_CLASS) ||
8207 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8208 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8209 tg3_flag(tp, 57765_PLUS))
8210 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8211 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8212 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8213 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8215 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8217 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8218 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8220 val = min(nic_rep_thresh, host_rep_thresh);
8221 tw32(RCVBDI_STD_THRESH, val);
8223 if (tg3_flag(tp, 57765_PLUS))
8224 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8226 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8229 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8231 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8233 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8234 tw32(RCVBDI_JUMBO_THRESH, val);
8236 if (tg3_flag(tp, 57765_PLUS))
8237 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8240 /* tp->lock is held. */
8241 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8243 u32 val, rdmac_mode;
8245 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8247 tg3_disable_ints(tp);
8251 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8253 if (tg3_flag(tp, INIT_COMPLETE))
8254 tg3_abort_hw(tp, 1);
8256 /* Enable MAC control of LPI */
8257 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8258 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8259 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8260 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8262 tw32_f(TG3_CPMU_EEE_CTRL,
8263 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8265 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8266 TG3_CPMU_EEEMD_LPI_IN_TX |
8267 TG3_CPMU_EEEMD_LPI_IN_RX |
8268 TG3_CPMU_EEEMD_EEE_ENABLE;
8270 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8271 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8273 if (tg3_flag(tp, ENABLE_APE))
8274 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8276 tw32_f(TG3_CPMU_EEE_MODE, val);
8278 tw32_f(TG3_CPMU_EEE_DBTMR1,
8279 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8280 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8282 tw32_f(TG3_CPMU_EEE_DBTMR2,
8283 TG3_CPMU_DBTMR2_APE_TX_2047US |
8284 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8290 err = tg3_chip_reset(tp);
8294 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8296 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8297 val = tr32(TG3_CPMU_CTRL);
8298 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8299 tw32(TG3_CPMU_CTRL, val);
8301 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8302 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8303 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8304 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8306 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8307 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8308 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8309 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8311 val = tr32(TG3_CPMU_HST_ACC);
8312 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8313 val |= CPMU_HST_ACC_MACCLK_6_25;
8314 tw32(TG3_CPMU_HST_ACC, val);
8317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8318 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8319 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8320 PCIE_PWR_MGMT_L1_THRESH_4MS;
8321 tw32(PCIE_PWR_MGMT_THRESH, val);
8323 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8324 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8326 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8328 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8329 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8332 if (tg3_flag(tp, L1PLLPD_EN)) {
8333 u32 grc_mode = tr32(GRC_MODE);
8335 /* Access the lower 1K of PL PCIE block registers. */
8336 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8337 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8339 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8340 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8341 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8343 tw32(GRC_MODE, grc_mode);
8346 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8347 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8348 u32 grc_mode = tr32(GRC_MODE);
8350 /* Access the lower 1K of PL PCIE block registers. */
8351 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8352 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8354 val = tr32(TG3_PCIE_TLDLPL_PORT +
8355 TG3_PCIE_PL_LO_PHYCTL5);
8356 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8357 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8359 tw32(GRC_MODE, grc_mode);
8362 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8363 u32 grc_mode = tr32(GRC_MODE);
8365 /* Access the lower 1K of DL PCIE block registers. */
8366 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8367 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8369 val = tr32(TG3_PCIE_TLDLPL_PORT +
8370 TG3_PCIE_DL_LO_FTSMAX);
8371 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8372 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8373 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8375 tw32(GRC_MODE, grc_mode);
8378 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8379 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8380 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8381 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8384 /* This works around an issue with Athlon chipsets on
8385 * B3 tigon3 silicon. This bit has no effect on any
8386 * other revision. But do not set this on PCI Express
8387 * chips and don't even touch the clocks if the CPMU is present.
8389 if (!tg3_flag(tp, CPMU_PRESENT)) {
8390 if (!tg3_flag(tp, PCI_EXPRESS))
8391 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8392 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8395 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8396 tg3_flag(tp, PCIX_MODE)) {
8397 val = tr32(TG3PCI_PCISTATE);
8398 val |= PCISTATE_RETRY_SAME_DMA;
8399 tw32(TG3PCI_PCISTATE, val);
8402 if (tg3_flag(tp, ENABLE_APE)) {
8403 /* Allow reads and writes to the
8404 * APE register and memory space.
8406 val = tr32(TG3PCI_PCISTATE);
8407 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8408 PCISTATE_ALLOW_APE_SHMEM_WR |
8409 PCISTATE_ALLOW_APE_PSPACE_WR;
8410 tw32(TG3PCI_PCISTATE, val);
8413 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8414 /* Enable some hw fixes. */
8415 val = tr32(TG3PCI_MSI_DATA);
8416 val |= (1 << 26) | (1 << 28) | (1 << 29);
8417 tw32(TG3PCI_MSI_DATA, val);
8420 /* Descriptor ring init may make accesses to the
8421 * NIC SRAM area to setup the TX descriptors, so we
8422 * can only do this after the hardware has been
8423 * successfully reset.
8425 err = tg3_init_rings(tp);
8429 if (tg3_flag(tp, 57765_PLUS)) {
8430 val = tr32(TG3PCI_DMA_RW_CTRL) &
8431 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8432 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8433 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8434 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8435 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8436 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8437 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8438 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8439 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8440 /* This value is determined during the probe time DMA
8441 * engine test, tg3_test_dma.
8443 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8446 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8447 GRC_MODE_4X_NIC_SEND_RINGS |
8448 GRC_MODE_NO_TX_PHDR_CSUM |
8449 GRC_MODE_NO_RX_PHDR_CSUM);
8450 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8452 /* Pseudo-header checksum is done by hardware logic and not
8453 * the offload processers, so make the chip do the pseudo-
8454 * header checksums on receive. For transmit it is more
8455 * convenient to do the pseudo-header checksum in software
8456 * as Linux does that on transmit for us in all cases.
8458 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8462 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8464 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8465 val = tr32(GRC_MISC_CFG);
8467 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8468 tw32(GRC_MISC_CFG, val);
8470 /* Initialize MBUF/DESC pool. */
8471 if (tg3_flag(tp, 5750_PLUS)) {
8473 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8474 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8476 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8478 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8479 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8480 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8481 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8484 fw_len = tp->fw_len;
8485 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8486 tw32(BUFMGR_MB_POOL_ADDR,
8487 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8488 tw32(BUFMGR_MB_POOL_SIZE,
8489 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8492 if (tp->dev->mtu <= ETH_DATA_LEN) {
8493 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8494 tp->bufmgr_config.mbuf_read_dma_low_water);
8495 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8496 tp->bufmgr_config.mbuf_mac_rx_low_water);
8497 tw32(BUFMGR_MB_HIGH_WATER,
8498 tp->bufmgr_config.mbuf_high_water);
8500 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8501 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8502 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8503 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8504 tw32(BUFMGR_MB_HIGH_WATER,
8505 tp->bufmgr_config.mbuf_high_water_jumbo);
8507 tw32(BUFMGR_DMA_LOW_WATER,
8508 tp->bufmgr_config.dma_low_water);
8509 tw32(BUFMGR_DMA_HIGH_WATER,
8510 tp->bufmgr_config.dma_high_water);
8512 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8514 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8516 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8517 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8518 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8519 tw32(BUFMGR_MODE, val);
8520 for (i = 0; i < 2000; i++) {
8521 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8526 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8530 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8531 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8533 tg3_setup_rxbd_thresholds(tp);
8535 /* Initialize TG3_BDINFO's at:
8536 * RCVDBDI_STD_BD: standard eth size rx ring
8537 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8538 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8541 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8542 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8543 * ring attribute flags
8544 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8546 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8547 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8549 * The size of each ring is fixed in the firmware, but the location is
8552 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8553 ((u64) tpr->rx_std_mapping >> 32));
8554 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8555 ((u64) tpr->rx_std_mapping & 0xffffffff));
8556 if (!tg3_flag(tp, 5717_PLUS))
8557 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8558 NIC_SRAM_RX_BUFFER_DESC);
8560 /* Disable the mini ring */
8561 if (!tg3_flag(tp, 5705_PLUS))
8562 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8563 BDINFO_FLAGS_DISABLED);
8565 /* Program the jumbo buffer descriptor ring control
8566 * blocks on those devices that have them.
8568 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8569 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8571 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8572 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8573 ((u64) tpr->rx_jmb_mapping >> 32));
8574 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8575 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8576 val = TG3_RX_JMB_RING_SIZE(tp) <<
8577 BDINFO_FLAGS_MAXLEN_SHIFT;
8578 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8579 val | BDINFO_FLAGS_USE_EXT_RECV);
8580 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8581 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8582 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8583 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8585 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8586 BDINFO_FLAGS_DISABLED);
8589 if (tg3_flag(tp, 57765_PLUS)) {
8590 val = TG3_RX_STD_RING_SIZE(tp);
8591 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8592 val |= (TG3_RX_STD_DMA_SZ << 2);
8594 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8596 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8598 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8600 tpr->rx_std_prod_idx = tp->rx_pending;
8601 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8603 tpr->rx_jmb_prod_idx =
8604 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8605 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8607 tg3_rings_reset(tp);
8609 /* Initialize MAC address and backoff seed. */
8610 __tg3_set_mac_addr(tp, 0);
8612 /* MTU + ethernet header + FCS + optional VLAN tag */
8613 tw32(MAC_RX_MTU_SIZE,
8614 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8616 /* The slot time is changed by tg3_setup_phy if we
8617 * run at gigabit with half duplex.
8619 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8620 (6 << TX_LENGTHS_IPG_SHIFT) |
8621 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8623 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8624 val |= tr32(MAC_TX_LENGTHS) &
8625 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8626 TX_LENGTHS_CNT_DWN_VAL_MSK);
8628 tw32(MAC_TX_LENGTHS, val);
8630 /* Receive rules. */
8631 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8632 tw32(RCVLPC_CONFIG, 0x0181);
8634 /* Calculate RDMAC_MODE setting early, we need it to determine
8635 * the RCVLPC_STATE_ENABLE mask.
8637 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8638 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8639 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8640 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8641 RDMAC_MODE_LNGREAD_ENAB);
8643 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8644 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8647 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8648 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8649 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8650 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8651 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8653 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8654 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8655 if (tg3_flag(tp, TSO_CAPABLE) &&
8656 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8657 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8658 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8659 !tg3_flag(tp, IS_5788)) {
8660 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8664 if (tg3_flag(tp, PCI_EXPRESS))
8665 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8667 if (tg3_flag(tp, HW_TSO_1) ||
8668 tg3_flag(tp, HW_TSO_2) ||
8669 tg3_flag(tp, HW_TSO_3))
8670 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8672 if (tg3_flag(tp, 57765_PLUS) ||
8673 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8675 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8678 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8680 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8682 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8683 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8684 tg3_flag(tp, 57765_PLUS)) {
8685 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8688 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8689 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8690 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8691 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8692 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8693 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8695 tw32(TG3_RDMA_RSRVCTRL_REG,
8696 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8699 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8700 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8701 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8702 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8703 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8704 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8707 /* Receive/send statistics. */
8708 if (tg3_flag(tp, 5750_PLUS)) {
8709 val = tr32(RCVLPC_STATS_ENABLE);
8710 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8711 tw32(RCVLPC_STATS_ENABLE, val);
8712 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8713 tg3_flag(tp, TSO_CAPABLE)) {
8714 val = tr32(RCVLPC_STATS_ENABLE);
8715 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8716 tw32(RCVLPC_STATS_ENABLE, val);
8718 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8720 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8721 tw32(SNDDATAI_STATSENAB, 0xffffff);
8722 tw32(SNDDATAI_STATSCTRL,
8723 (SNDDATAI_SCTRL_ENABLE |
8724 SNDDATAI_SCTRL_FASTUPD));
8726 /* Setup host coalescing engine. */
8727 tw32(HOSTCC_MODE, 0);
8728 for (i = 0; i < 2000; i++) {
8729 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8734 __tg3_set_coalesce(tp, &tp->coal);
8736 if (!tg3_flag(tp, 5705_PLUS)) {
8737 /* Status/statistics block address. See tg3_timer,
8738 * the tg3_periodic_fetch_stats call there, and
8739 * tg3_get_stats to see how this works for 5705/5750 chips.
8741 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8742 ((u64) tp->stats_mapping >> 32));
8743 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8744 ((u64) tp->stats_mapping & 0xffffffff));
8745 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8747 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8749 /* Clear statistics and status block memory areas */
8750 for (i = NIC_SRAM_STATS_BLK;
8751 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8753 tg3_write_mem(tp, i, 0);
8758 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8760 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8761 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8762 if (!tg3_flag(tp, 5705_PLUS))
8763 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8765 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8766 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8767 /* reset to prevent losing 1st rx packet intermittently */
8768 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8772 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8773 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8774 MAC_MODE_FHDE_ENABLE;
8775 if (tg3_flag(tp, ENABLE_APE))
8776 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8777 if (!tg3_flag(tp, 5705_PLUS) &&
8778 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8779 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8780 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8781 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8784 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8785 * If TG3_FLAG_IS_NIC is zero, we should read the
8786 * register to preserve the GPIO settings for LOMs. The GPIOs,
8787 * whether used as inputs or outputs, are set by boot code after
8790 if (!tg3_flag(tp, IS_NIC)) {
8793 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8794 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8795 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8797 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8798 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8799 GRC_LCLCTRL_GPIO_OUTPUT3;
8801 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8802 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8804 tp->grc_local_ctrl &= ~gpio_mask;
8805 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8807 /* GPIO1 must be driven high for eeprom write protect */
8808 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8809 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8810 GRC_LCLCTRL_GPIO_OUTPUT1);
8812 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8815 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8816 val = tr32(MSGINT_MODE);
8817 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8818 if (!tg3_flag(tp, 1SHOT_MSI))
8819 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8820 tw32(MSGINT_MODE, val);
8823 if (!tg3_flag(tp, 5705_PLUS)) {
8824 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8828 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8829 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8830 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8831 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8832 WDMAC_MODE_LNGREAD_ENAB);
8834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8835 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8836 if (tg3_flag(tp, TSO_CAPABLE) &&
8837 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8838 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8840 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8841 !tg3_flag(tp, IS_5788)) {
8842 val |= WDMAC_MODE_RX_ACCEL;
8846 /* Enable host coalescing bug fix */
8847 if (tg3_flag(tp, 5755_PLUS))
8848 val |= WDMAC_MODE_STATUS_TAG_FIX;
8850 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8851 val |= WDMAC_MODE_BURST_ALL_DATA;
8853 tw32_f(WDMAC_MODE, val);
8856 if (tg3_flag(tp, PCIX_MODE)) {
8859 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8862 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8863 pcix_cmd |= PCI_X_CMD_READ_2K;
8864 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8865 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8866 pcix_cmd |= PCI_X_CMD_READ_2K;
8868 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8872 tw32_f(RDMAC_MODE, rdmac_mode);
8875 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8876 if (!tg3_flag(tp, 5705_PLUS))
8877 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8881 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8883 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8885 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8886 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8887 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8888 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8889 val |= RCVDBDI_MODE_LRG_RING_SZ;
8890 tw32(RCVDBDI_MODE, val);
8891 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8892 if (tg3_flag(tp, HW_TSO_1) ||
8893 tg3_flag(tp, HW_TSO_2) ||
8894 tg3_flag(tp, HW_TSO_3))
8895 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8896 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8897 if (tg3_flag(tp, ENABLE_TSS))
8898 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8899 tw32(SNDBDI_MODE, val);
8900 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8902 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8903 err = tg3_load_5701_a0_firmware_fix(tp);
8908 if (tg3_flag(tp, TSO_CAPABLE)) {
8909 err = tg3_load_tso_firmware(tp);
8914 tp->tx_mode = TX_MODE_ENABLE;
8916 if (tg3_flag(tp, 5755_PLUS) ||
8917 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8918 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8920 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8921 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8922 tp->tx_mode &= ~val;
8923 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8926 tw32_f(MAC_TX_MODE, tp->tx_mode);
8929 if (tg3_flag(tp, ENABLE_RSS)) {
8931 u32 reg = MAC_RSS_INDIR_TBL_0;
8933 if (tp->irq_cnt == 2) {
8934 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8941 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8942 val = i % (tp->irq_cnt - 1);
8944 for (; i % 8; i++) {
8946 val |= (i % (tp->irq_cnt - 1));
8953 /* Setup the "secret" hash key. */
8954 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8955 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8956 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8957 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8958 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8959 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8960 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8961 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8962 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8963 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8966 tp->rx_mode = RX_MODE_ENABLE;
8967 if (tg3_flag(tp, 5755_PLUS))
8968 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8970 if (tg3_flag(tp, ENABLE_RSS))
8971 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8972 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8973 RX_MODE_RSS_IPV6_HASH_EN |
8974 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8975 RX_MODE_RSS_IPV4_HASH_EN |
8976 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8978 tw32_f(MAC_RX_MODE, tp->rx_mode);
8981 tw32(MAC_LED_CTRL, tp->led_ctrl);
8983 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8984 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8985 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8988 tw32_f(MAC_RX_MODE, tp->rx_mode);
8991 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8992 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8993 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8994 /* Set drive transmission level to 1.2V */
8995 /* only if the signal pre-emphasis bit is not set */
8996 val = tr32(MAC_SERDES_CFG);
8999 tw32(MAC_SERDES_CFG, val);
9001 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9002 tw32(MAC_SERDES_CFG, 0x616000);
9005 /* Prevent chip from dropping frames when flow control
9008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9012 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9015 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9016 /* Use hardware link auto-negotiation */
9017 tg3_flag_set(tp, HW_AUTONEG);
9020 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9024 tmp = tr32(SERDES_RX_CTRL);
9025 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9026 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9027 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9028 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9031 if (!tg3_flag(tp, USE_PHYLIB)) {
9032 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9033 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9034 tp->link_config.speed = tp->link_config.orig_speed;
9035 tp->link_config.duplex = tp->link_config.orig_duplex;
9036 tp->link_config.autoneg = tp->link_config.orig_autoneg;
9039 err = tg3_setup_phy(tp, 0);
9043 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9044 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9047 /* Clear CRC stats. */
9048 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9049 tg3_writephy(tp, MII_TG3_TEST1,
9050 tmp | MII_TG3_TEST1_CRC_EN);
9051 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9056 __tg3_set_rx_mode(tp->dev);
9058 /* Initialize receive rules. */
9059 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9060 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9061 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9062 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9064 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9068 if (tg3_flag(tp, ENABLE_ASF))
9072 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9074 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9076 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9078 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9080 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9082 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9084 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9086 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9088 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9090 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9092 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9094 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9096 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9098 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9106 if (tg3_flag(tp, ENABLE_APE))
9107 /* Write our heartbeat update interval to APE. */
9108 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9109 APE_HOST_HEARTBEAT_INT_DISABLE);
9111 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9116 /* Called at device open time to get the chip ready for
9117 * packet processing. Invoked with tp->lock held.
9119 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9121 tg3_switch_clocks(tp);
9123 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9125 return tg3_reset_hw(tp, reset_phy);
9128 #define TG3_STAT_ADD32(PSTAT, REG) \
9129 do { u32 __val = tr32(REG); \
9130 (PSTAT)->low += __val; \
9131 if ((PSTAT)->low < __val) \
9132 (PSTAT)->high += 1; \
9135 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9137 struct tg3_hw_stats *sp = tp->hw_stats;
9139 if (!netif_carrier_ok(tp->dev))
9142 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9143 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9144 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9145 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9146 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9147 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9148 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9149 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9150 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9151 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9152 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9153 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9154 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9156 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9157 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9158 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9159 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9160 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9161 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9162 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9163 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9164 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9165 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9166 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9167 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9168 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9169 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9171 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9172 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9173 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9174 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9175 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9177 u32 val = tr32(HOSTCC_FLOW_ATTN);
9178 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9180 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9181 sp->rx_discards.low += val;
9182 if (sp->rx_discards.low < val)
9183 sp->rx_discards.high += 1;
9185 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9187 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9190 static void tg3_chk_missed_msi(struct tg3 *tp)
9194 for (i = 0; i < tp->irq_cnt; i++) {
9195 struct tg3_napi *tnapi = &tp->napi[i];
9197 if (tg3_has_work(tnapi)) {
9198 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9199 tnapi->last_tx_cons == tnapi->tx_cons) {
9200 if (tnapi->chk_msi_cnt < 1) {
9201 tnapi->chk_msi_cnt++;
9207 tnapi->chk_msi_cnt = 0;
9208 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9209 tnapi->last_tx_cons = tnapi->tx_cons;
9213 static void tg3_timer(unsigned long __opaque)
9215 struct tg3 *tp = (struct tg3 *) __opaque;
9217 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9220 spin_lock(&tp->lock);
9222 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9224 tg3_chk_missed_msi(tp);
9226 if (!tg3_flag(tp, TAGGED_STATUS)) {
9227 /* All of this garbage is because when using non-tagged
9228 * IRQ status the mailbox/status_block protocol the chip
9229 * uses with the cpu is race prone.
9231 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9232 tw32(GRC_LOCAL_CTRL,
9233 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9235 tw32(HOSTCC_MODE, tp->coalesce_mode |
9236 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9239 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9240 spin_unlock(&tp->lock);
9241 tg3_reset_task_schedule(tp);
9246 /* This part only runs once per second. */
9247 if (!--tp->timer_counter) {
9248 if (tg3_flag(tp, 5705_PLUS))
9249 tg3_periodic_fetch_stats(tp);
9251 if (tp->setlpicnt && !--tp->setlpicnt)
9252 tg3_phy_eee_enable(tp);
9254 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9258 mac_stat = tr32(MAC_STATUS);
9261 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9262 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9264 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9268 tg3_setup_phy(tp, 0);
9269 } else if (tg3_flag(tp, POLL_SERDES)) {
9270 u32 mac_stat = tr32(MAC_STATUS);
9273 if (netif_carrier_ok(tp->dev) &&
9274 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9277 if (!netif_carrier_ok(tp->dev) &&
9278 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9279 MAC_STATUS_SIGNAL_DET))) {
9283 if (!tp->serdes_counter) {
9286 ~MAC_MODE_PORT_MODE_MASK));
9288 tw32_f(MAC_MODE, tp->mac_mode);
9291 tg3_setup_phy(tp, 0);
9293 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9294 tg3_flag(tp, 5780_CLASS)) {
9295 tg3_serdes_parallel_detect(tp);
9298 tp->timer_counter = tp->timer_multiplier;
9301 /* Heartbeat is only sent once every 2 seconds.
9303 * The heartbeat is to tell the ASF firmware that the host
9304 * driver is still alive. In the event that the OS crashes,
9305 * ASF needs to reset the hardware to free up the FIFO space
9306 * that may be filled with rx packets destined for the host.
9307 * If the FIFO is full, ASF will no longer function properly.
9309 * Unintended resets have been reported on real time kernels
9310 * where the timer doesn't run on time. Netpoll will also have
9313 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9314 * to check the ring condition when the heartbeat is expiring
9315 * before doing the reset. This will prevent most unintended
9318 if (!--tp->asf_counter) {
9319 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9320 tg3_wait_for_event_ack(tp);
9322 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9323 FWCMD_NICDRV_ALIVE3);
9324 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9325 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9326 TG3_FW_UPDATE_TIMEOUT_SEC);
9328 tg3_generate_fw_event(tp);
9330 tp->asf_counter = tp->asf_multiplier;
9333 spin_unlock(&tp->lock);
9336 tp->timer.expires = jiffies + tp->timer_offset;
9337 add_timer(&tp->timer);
9340 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9343 unsigned long flags;
9345 struct tg3_napi *tnapi = &tp->napi[irq_num];
9347 if (tp->irq_cnt == 1)
9348 name = tp->dev->name;
9350 name = &tnapi->irq_lbl[0];
9351 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9352 name[IFNAMSIZ-1] = 0;
9355 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9357 if (tg3_flag(tp, 1SHOT_MSI))
9362 if (tg3_flag(tp, TAGGED_STATUS))
9363 fn = tg3_interrupt_tagged;
9364 flags = IRQF_SHARED;
9367 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9370 static int tg3_test_interrupt(struct tg3 *tp)
9372 struct tg3_napi *tnapi = &tp->napi[0];
9373 struct net_device *dev = tp->dev;
9374 int err, i, intr_ok = 0;
9377 if (!netif_running(dev))
9380 tg3_disable_ints(tp);
9382 free_irq(tnapi->irq_vec, tnapi);
9385 * Turn off MSI one shot mode. Otherwise this test has no
9386 * observable way to know whether the interrupt was delivered.
9388 if (tg3_flag(tp, 57765_PLUS)) {
9389 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9390 tw32(MSGINT_MODE, val);
9393 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9394 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9398 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9399 tg3_enable_ints(tp);
9401 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9404 for (i = 0; i < 5; i++) {
9405 u32 int_mbox, misc_host_ctrl;
9407 int_mbox = tr32_mailbox(tnapi->int_mbox);
9408 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9410 if ((int_mbox != 0) ||
9411 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9416 if (tg3_flag(tp, 57765_PLUS) &&
9417 tnapi->hw_status->status_tag != tnapi->last_tag)
9418 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9423 tg3_disable_ints(tp);
9425 free_irq(tnapi->irq_vec, tnapi);
9427 err = tg3_request_irq(tp, 0);
9433 /* Reenable MSI one shot mode. */
9434 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9435 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9436 tw32(MSGINT_MODE, val);
9444 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9445 * successfully restored
9447 static int tg3_test_msi(struct tg3 *tp)
9452 if (!tg3_flag(tp, USING_MSI))
9455 /* Turn off SERR reporting in case MSI terminates with Master
9458 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9459 pci_write_config_word(tp->pdev, PCI_COMMAND,
9460 pci_cmd & ~PCI_COMMAND_SERR);
9462 err = tg3_test_interrupt(tp);
9464 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9469 /* other failures */
9473 /* MSI test failed, go back to INTx mode */
9474 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9475 "to INTx mode. Please report this failure to the PCI "
9476 "maintainer and include system chipset information\n");
9478 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9480 pci_disable_msi(tp->pdev);
9482 tg3_flag_clear(tp, USING_MSI);
9483 tp->napi[0].irq_vec = tp->pdev->irq;
9485 err = tg3_request_irq(tp, 0);
9489 /* Need to reset the chip because the MSI cycle may have terminated
9490 * with Master Abort.
9492 tg3_full_lock(tp, 1);
9494 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9495 err = tg3_init_hw(tp, 1);
9497 tg3_full_unlock(tp);
9500 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9505 static int tg3_request_firmware(struct tg3 *tp)
9507 const __be32 *fw_data;
9509 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9510 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9515 fw_data = (void *)tp->fw->data;
9517 /* Firmware blob starts with version numbers, followed by
9518 * start address and _full_ length including BSS sections
9519 * (which must be longer than the actual data, of course
9522 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9523 if (tp->fw_len < (tp->fw->size - 12)) {
9524 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9525 tp->fw_len, tp->fw_needed);
9526 release_firmware(tp->fw);
9531 /* We no longer need firmware; we have it. */
9532 tp->fw_needed = NULL;
9536 static bool tg3_enable_msix(struct tg3 *tp)
9538 int i, rc, cpus = num_online_cpus();
9539 struct msix_entry msix_ent[tp->irq_max];
9542 /* Just fallback to the simpler MSI mode. */
9546 * We want as many rx rings enabled as there are cpus.
9547 * The first MSIX vector only deals with link interrupts, etc,
9548 * so we add one to the number of vectors we are requesting.
9550 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9552 for (i = 0; i < tp->irq_max; i++) {
9553 msix_ent[i].entry = i;
9554 msix_ent[i].vector = 0;
9557 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9560 } else if (rc != 0) {
9561 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9563 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9568 for (i = 0; i < tp->irq_max; i++)
9569 tp->napi[i].irq_vec = msix_ent[i].vector;
9571 netif_set_real_num_tx_queues(tp->dev, 1);
9572 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9573 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9574 pci_disable_msix(tp->pdev);
9578 if (tp->irq_cnt > 1) {
9579 tg3_flag_set(tp, ENABLE_RSS);
9581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9583 tg3_flag_set(tp, ENABLE_TSS);
9584 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9591 static void tg3_ints_init(struct tg3 *tp)
9593 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9594 !tg3_flag(tp, TAGGED_STATUS)) {
9595 /* All MSI supporting chips should support tagged
9596 * status. Assert that this is the case.
9598 netdev_warn(tp->dev,
9599 "MSI without TAGGED_STATUS? Not using MSI\n");
9603 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9604 tg3_flag_set(tp, USING_MSIX);
9605 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9606 tg3_flag_set(tp, USING_MSI);
9608 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9609 u32 msi_mode = tr32(MSGINT_MODE);
9610 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9611 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9612 if (!tg3_flag(tp, 1SHOT_MSI))
9613 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9614 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9617 if (!tg3_flag(tp, USING_MSIX)) {
9619 tp->napi[0].irq_vec = tp->pdev->irq;
9620 netif_set_real_num_tx_queues(tp->dev, 1);
9621 netif_set_real_num_rx_queues(tp->dev, 1);
9625 static void tg3_ints_fini(struct tg3 *tp)
9627 if (tg3_flag(tp, USING_MSIX))
9628 pci_disable_msix(tp->pdev);
9629 else if (tg3_flag(tp, USING_MSI))
9630 pci_disable_msi(tp->pdev);
9631 tg3_flag_clear(tp, USING_MSI);
9632 tg3_flag_clear(tp, USING_MSIX);
9633 tg3_flag_clear(tp, ENABLE_RSS);
9634 tg3_flag_clear(tp, ENABLE_TSS);
9637 static int tg3_open(struct net_device *dev)
9639 struct tg3 *tp = netdev_priv(dev);
9642 if (tp->fw_needed) {
9643 err = tg3_request_firmware(tp);
9644 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9648 netdev_warn(tp->dev, "TSO capability disabled\n");
9649 tg3_flag_clear(tp, TSO_CAPABLE);
9650 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9651 netdev_notice(tp->dev, "TSO capability restored\n");
9652 tg3_flag_set(tp, TSO_CAPABLE);
9656 netif_carrier_off(tp->dev);
9658 err = tg3_power_up(tp);
9662 tg3_full_lock(tp, 0);
9664 tg3_disable_ints(tp);
9665 tg3_flag_clear(tp, INIT_COMPLETE);
9667 tg3_full_unlock(tp);
9670 * Setup interrupts first so we know how
9671 * many NAPI resources to allocate
9675 /* The placement of this call is tied
9676 * to the setup and use of Host TX descriptors.
9678 err = tg3_alloc_consistent(tp);
9684 tg3_napi_enable(tp);
9686 for (i = 0; i < tp->irq_cnt; i++) {
9687 struct tg3_napi *tnapi = &tp->napi[i];
9688 err = tg3_request_irq(tp, i);
9690 for (i--; i >= 0; i--) {
9691 tnapi = &tp->napi[i];
9692 free_irq(tnapi->irq_vec, tnapi);
9698 tg3_full_lock(tp, 0);
9700 err = tg3_init_hw(tp, 1);
9702 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9705 if (tg3_flag(tp, TAGGED_STATUS) &&
9706 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9707 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9708 tp->timer_offset = HZ;
9710 tp->timer_offset = HZ / 10;
9712 BUG_ON(tp->timer_offset > HZ);
9713 tp->timer_counter = tp->timer_multiplier =
9714 (HZ / tp->timer_offset);
9715 tp->asf_counter = tp->asf_multiplier =
9716 ((HZ / tp->timer_offset) * 2);
9718 init_timer(&tp->timer);
9719 tp->timer.expires = jiffies + tp->timer_offset;
9720 tp->timer.data = (unsigned long) tp;
9721 tp->timer.function = tg3_timer;
9724 tg3_full_unlock(tp);
9729 if (tg3_flag(tp, USING_MSI)) {
9730 err = tg3_test_msi(tp);
9733 tg3_full_lock(tp, 0);
9734 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9736 tg3_full_unlock(tp);
9741 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9742 u32 val = tr32(PCIE_TRANSACTION_CFG);
9744 tw32(PCIE_TRANSACTION_CFG,
9745 val | PCIE_TRANS_CFG_1SHOT_MSI);
9751 tg3_full_lock(tp, 0);
9753 add_timer(&tp->timer);
9754 tg3_flag_set(tp, INIT_COMPLETE);
9755 tg3_enable_ints(tp);
9757 tg3_full_unlock(tp);
9759 netif_tx_start_all_queues(dev);
9762 * Reset loopback feature if it was turned on while the device was down
9763 * make sure that it's installed properly now.
9765 if (dev->features & NETIF_F_LOOPBACK)
9766 tg3_set_loopback(dev, dev->features);
9771 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9772 struct tg3_napi *tnapi = &tp->napi[i];
9773 free_irq(tnapi->irq_vec, tnapi);
9777 tg3_napi_disable(tp);
9779 tg3_free_consistent(tp);
9783 tg3_frob_aux_power(tp, false);
9784 pci_set_power_state(tp->pdev, PCI_D3hot);
9788 static int tg3_close(struct net_device *dev)
9791 struct tg3 *tp = netdev_priv(dev);
9793 tg3_napi_disable(tp);
9794 tg3_reset_task_cancel(tp);
9796 netif_tx_stop_all_queues(dev);
9798 del_timer_sync(&tp->timer);
9802 tg3_full_lock(tp, 1);
9804 tg3_disable_ints(tp);
9806 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9808 tg3_flag_clear(tp, INIT_COMPLETE);
9810 tg3_full_unlock(tp);
9812 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9813 struct tg3_napi *tnapi = &tp->napi[i];
9814 free_irq(tnapi->irq_vec, tnapi);
9819 /* Clear stats across close / open calls */
9820 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
9821 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
9825 tg3_free_consistent(tp);
9829 netif_carrier_off(tp->dev);
9834 static inline u64 get_stat64(tg3_stat64_t *val)
9836 return ((u64)val->high << 32) | ((u64)val->low);
9839 static u64 calc_crc_errors(struct tg3 *tp)
9841 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9843 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9844 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9848 spin_lock_bh(&tp->lock);
9849 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9850 tg3_writephy(tp, MII_TG3_TEST1,
9851 val | MII_TG3_TEST1_CRC_EN);
9852 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9855 spin_unlock_bh(&tp->lock);
9857 tp->phy_crc_errors += val;
9859 return tp->phy_crc_errors;
9862 return get_stat64(&hw_stats->rx_fcs_errors);
9865 #define ESTAT_ADD(member) \
9866 estats->member = old_estats->member + \
9867 get_stat64(&hw_stats->member)
9869 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
9870 struct tg3_ethtool_stats *estats)
9872 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9873 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9878 ESTAT_ADD(rx_octets);
9879 ESTAT_ADD(rx_fragments);
9880 ESTAT_ADD(rx_ucast_packets);
9881 ESTAT_ADD(rx_mcast_packets);
9882 ESTAT_ADD(rx_bcast_packets);
9883 ESTAT_ADD(rx_fcs_errors);
9884 ESTAT_ADD(rx_align_errors);
9885 ESTAT_ADD(rx_xon_pause_rcvd);
9886 ESTAT_ADD(rx_xoff_pause_rcvd);
9887 ESTAT_ADD(rx_mac_ctrl_rcvd);
9888 ESTAT_ADD(rx_xoff_entered);
9889 ESTAT_ADD(rx_frame_too_long_errors);
9890 ESTAT_ADD(rx_jabbers);
9891 ESTAT_ADD(rx_undersize_packets);
9892 ESTAT_ADD(rx_in_length_errors);
9893 ESTAT_ADD(rx_out_length_errors);
9894 ESTAT_ADD(rx_64_or_less_octet_packets);
9895 ESTAT_ADD(rx_65_to_127_octet_packets);
9896 ESTAT_ADD(rx_128_to_255_octet_packets);
9897 ESTAT_ADD(rx_256_to_511_octet_packets);
9898 ESTAT_ADD(rx_512_to_1023_octet_packets);
9899 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9900 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9901 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9902 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9903 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9905 ESTAT_ADD(tx_octets);
9906 ESTAT_ADD(tx_collisions);
9907 ESTAT_ADD(tx_xon_sent);
9908 ESTAT_ADD(tx_xoff_sent);
9909 ESTAT_ADD(tx_flow_control);
9910 ESTAT_ADD(tx_mac_errors);
9911 ESTAT_ADD(tx_single_collisions);
9912 ESTAT_ADD(tx_mult_collisions);
9913 ESTAT_ADD(tx_deferred);
9914 ESTAT_ADD(tx_excessive_collisions);
9915 ESTAT_ADD(tx_late_collisions);
9916 ESTAT_ADD(tx_collide_2times);
9917 ESTAT_ADD(tx_collide_3times);
9918 ESTAT_ADD(tx_collide_4times);
9919 ESTAT_ADD(tx_collide_5times);
9920 ESTAT_ADD(tx_collide_6times);
9921 ESTAT_ADD(tx_collide_7times);
9922 ESTAT_ADD(tx_collide_8times);
9923 ESTAT_ADD(tx_collide_9times);
9924 ESTAT_ADD(tx_collide_10times);
9925 ESTAT_ADD(tx_collide_11times);
9926 ESTAT_ADD(tx_collide_12times);
9927 ESTAT_ADD(tx_collide_13times);
9928 ESTAT_ADD(tx_collide_14times);
9929 ESTAT_ADD(tx_collide_15times);
9930 ESTAT_ADD(tx_ucast_packets);
9931 ESTAT_ADD(tx_mcast_packets);
9932 ESTAT_ADD(tx_bcast_packets);
9933 ESTAT_ADD(tx_carrier_sense_errors);
9934 ESTAT_ADD(tx_discards);
9935 ESTAT_ADD(tx_errors);
9937 ESTAT_ADD(dma_writeq_full);
9938 ESTAT_ADD(dma_write_prioq_full);
9939 ESTAT_ADD(rxbds_empty);
9940 ESTAT_ADD(rx_discards);
9941 ESTAT_ADD(rx_errors);
9942 ESTAT_ADD(rx_threshold_hit);
9944 ESTAT_ADD(dma_readq_full);
9945 ESTAT_ADD(dma_read_prioq_full);
9946 ESTAT_ADD(tx_comp_queue_full);
9948 ESTAT_ADD(ring_set_send_prod_index);
9949 ESTAT_ADD(ring_status_update);
9950 ESTAT_ADD(nic_irqs);
9951 ESTAT_ADD(nic_avoided_irqs);
9952 ESTAT_ADD(nic_tx_threshold_hit);
9954 ESTAT_ADD(mbuf_lwm_thresh_hit);
9959 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9960 struct rtnl_link_stats64 *stats)
9962 struct tg3 *tp = netdev_priv(dev);
9963 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9964 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9969 stats->rx_packets = old_stats->rx_packets +
9970 get_stat64(&hw_stats->rx_ucast_packets) +
9971 get_stat64(&hw_stats->rx_mcast_packets) +
9972 get_stat64(&hw_stats->rx_bcast_packets);
9974 stats->tx_packets = old_stats->tx_packets +
9975 get_stat64(&hw_stats->tx_ucast_packets) +
9976 get_stat64(&hw_stats->tx_mcast_packets) +
9977 get_stat64(&hw_stats->tx_bcast_packets);
9979 stats->rx_bytes = old_stats->rx_bytes +
9980 get_stat64(&hw_stats->rx_octets);
9981 stats->tx_bytes = old_stats->tx_bytes +
9982 get_stat64(&hw_stats->tx_octets);
9984 stats->rx_errors = old_stats->rx_errors +
9985 get_stat64(&hw_stats->rx_errors);
9986 stats->tx_errors = old_stats->tx_errors +
9987 get_stat64(&hw_stats->tx_errors) +
9988 get_stat64(&hw_stats->tx_mac_errors) +
9989 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9990 get_stat64(&hw_stats->tx_discards);
9992 stats->multicast = old_stats->multicast +
9993 get_stat64(&hw_stats->rx_mcast_packets);
9994 stats->collisions = old_stats->collisions +
9995 get_stat64(&hw_stats->tx_collisions);
9997 stats->rx_length_errors = old_stats->rx_length_errors +
9998 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9999 get_stat64(&hw_stats->rx_undersize_packets);
10001 stats->rx_over_errors = old_stats->rx_over_errors +
10002 get_stat64(&hw_stats->rxbds_empty);
10003 stats->rx_frame_errors = old_stats->rx_frame_errors +
10004 get_stat64(&hw_stats->rx_align_errors);
10005 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10006 get_stat64(&hw_stats->tx_discards);
10007 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10008 get_stat64(&hw_stats->tx_carrier_sense_errors);
10010 stats->rx_crc_errors = old_stats->rx_crc_errors +
10011 calc_crc_errors(tp);
10013 stats->rx_missed_errors = old_stats->rx_missed_errors +
10014 get_stat64(&hw_stats->rx_discards);
10016 stats->rx_dropped = tp->rx_dropped;
10017 stats->tx_dropped = tp->tx_dropped;
10022 static inline u32 calc_crc(unsigned char *buf, int len)
10030 for (j = 0; j < len; j++) {
10033 for (k = 0; k < 8; k++) {
10046 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10048 /* accept or reject all multicast frames */
10049 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10050 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10051 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10052 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10055 static void __tg3_set_rx_mode(struct net_device *dev)
10057 struct tg3 *tp = netdev_priv(dev);
10060 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10061 RX_MODE_KEEP_VLAN_TAG);
10063 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10064 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10067 if (!tg3_flag(tp, ENABLE_ASF))
10068 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10071 if (dev->flags & IFF_PROMISC) {
10072 /* Promiscuous mode. */
10073 rx_mode |= RX_MODE_PROMISC;
10074 } else if (dev->flags & IFF_ALLMULTI) {
10075 /* Accept all multicast. */
10076 tg3_set_multi(tp, 1);
10077 } else if (netdev_mc_empty(dev)) {
10078 /* Reject all multicast. */
10079 tg3_set_multi(tp, 0);
10081 /* Accept one or more multicast(s). */
10082 struct netdev_hw_addr *ha;
10083 u32 mc_filter[4] = { 0, };
10088 netdev_for_each_mc_addr(ha, dev) {
10089 crc = calc_crc(ha->addr, ETH_ALEN);
10091 regidx = (bit & 0x60) >> 5;
10093 mc_filter[regidx] |= (1 << bit);
10096 tw32(MAC_HASH_REG_0, mc_filter[0]);
10097 tw32(MAC_HASH_REG_1, mc_filter[1]);
10098 tw32(MAC_HASH_REG_2, mc_filter[2]);
10099 tw32(MAC_HASH_REG_3, mc_filter[3]);
10102 if (rx_mode != tp->rx_mode) {
10103 tp->rx_mode = rx_mode;
10104 tw32_f(MAC_RX_MODE, rx_mode);
10109 static void tg3_set_rx_mode(struct net_device *dev)
10111 struct tg3 *tp = netdev_priv(dev);
10113 if (!netif_running(dev))
10116 tg3_full_lock(tp, 0);
10117 __tg3_set_rx_mode(dev);
10118 tg3_full_unlock(tp);
10121 static int tg3_get_regs_len(struct net_device *dev)
10123 return TG3_REG_BLK_SIZE;
10126 static void tg3_get_regs(struct net_device *dev,
10127 struct ethtool_regs *regs, void *_p)
10129 struct tg3 *tp = netdev_priv(dev);
10133 memset(_p, 0, TG3_REG_BLK_SIZE);
10135 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10138 tg3_full_lock(tp, 0);
10140 tg3_dump_legacy_regs(tp, (u32 *)_p);
10142 tg3_full_unlock(tp);
10145 static int tg3_get_eeprom_len(struct net_device *dev)
10147 struct tg3 *tp = netdev_priv(dev);
10149 return tp->nvram_size;
10152 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10154 struct tg3 *tp = netdev_priv(dev);
10157 u32 i, offset, len, b_offset, b_count;
10160 if (tg3_flag(tp, NO_NVRAM))
10163 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10166 offset = eeprom->offset;
10170 eeprom->magic = TG3_EEPROM_MAGIC;
10173 /* adjustments to start on required 4 byte boundary */
10174 b_offset = offset & 3;
10175 b_count = 4 - b_offset;
10176 if (b_count > len) {
10177 /* i.e. offset=1 len=2 */
10180 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10183 memcpy(data, ((char *)&val) + b_offset, b_count);
10186 eeprom->len += b_count;
10189 /* read bytes up to the last 4 byte boundary */
10190 pd = &data[eeprom->len];
10191 for (i = 0; i < (len - (len & 3)); i += 4) {
10192 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10197 memcpy(pd + i, &val, 4);
10202 /* read last bytes not ending on 4 byte boundary */
10203 pd = &data[eeprom->len];
10205 b_offset = offset + len - b_count;
10206 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10209 memcpy(pd, &val, b_count);
10210 eeprom->len += b_count;
10215 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10217 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10219 struct tg3 *tp = netdev_priv(dev);
10221 u32 offset, len, b_offset, odd_len;
10225 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10228 if (tg3_flag(tp, NO_NVRAM) ||
10229 eeprom->magic != TG3_EEPROM_MAGIC)
10232 offset = eeprom->offset;
10235 if ((b_offset = (offset & 3))) {
10236 /* adjustments to start on required 4 byte boundary */
10237 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10248 /* adjustments to end on required 4 byte boundary */
10250 len = (len + 3) & ~3;
10251 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10257 if (b_offset || odd_len) {
10258 buf = kmalloc(len, GFP_KERNEL);
10262 memcpy(buf, &start, 4);
10264 memcpy(buf+len-4, &end, 4);
10265 memcpy(buf + b_offset, data, eeprom->len);
10268 ret = tg3_nvram_write_block(tp, offset, len, buf);
10276 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10278 struct tg3 *tp = netdev_priv(dev);
10280 if (tg3_flag(tp, USE_PHYLIB)) {
10281 struct phy_device *phydev;
10282 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10284 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10285 return phy_ethtool_gset(phydev, cmd);
10288 cmd->supported = (SUPPORTED_Autoneg);
10290 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10291 cmd->supported |= (SUPPORTED_1000baseT_Half |
10292 SUPPORTED_1000baseT_Full);
10294 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10295 cmd->supported |= (SUPPORTED_100baseT_Half |
10296 SUPPORTED_100baseT_Full |
10297 SUPPORTED_10baseT_Half |
10298 SUPPORTED_10baseT_Full |
10300 cmd->port = PORT_TP;
10302 cmd->supported |= SUPPORTED_FIBRE;
10303 cmd->port = PORT_FIBRE;
10306 cmd->advertising = tp->link_config.advertising;
10307 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10308 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10309 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10310 cmd->advertising |= ADVERTISED_Pause;
10312 cmd->advertising |= ADVERTISED_Pause |
10313 ADVERTISED_Asym_Pause;
10315 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10316 cmd->advertising |= ADVERTISED_Asym_Pause;
10319 if (netif_running(dev) && netif_carrier_ok(dev)) {
10320 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10321 cmd->duplex = tp->link_config.active_duplex;
10322 cmd->lp_advertising = tp->link_config.rmt_adv;
10323 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10324 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10325 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10327 cmd->eth_tp_mdix = ETH_TP_MDI;
10330 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10331 cmd->duplex = DUPLEX_INVALID;
10332 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10334 cmd->phy_address = tp->phy_addr;
10335 cmd->transceiver = XCVR_INTERNAL;
10336 cmd->autoneg = tp->link_config.autoneg;
10342 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10344 struct tg3 *tp = netdev_priv(dev);
10345 u32 speed = ethtool_cmd_speed(cmd);
10347 if (tg3_flag(tp, USE_PHYLIB)) {
10348 struct phy_device *phydev;
10349 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10351 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10352 return phy_ethtool_sset(phydev, cmd);
10355 if (cmd->autoneg != AUTONEG_ENABLE &&
10356 cmd->autoneg != AUTONEG_DISABLE)
10359 if (cmd->autoneg == AUTONEG_DISABLE &&
10360 cmd->duplex != DUPLEX_FULL &&
10361 cmd->duplex != DUPLEX_HALF)
10364 if (cmd->autoneg == AUTONEG_ENABLE) {
10365 u32 mask = ADVERTISED_Autoneg |
10367 ADVERTISED_Asym_Pause;
10369 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10370 mask |= ADVERTISED_1000baseT_Half |
10371 ADVERTISED_1000baseT_Full;
10373 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10374 mask |= ADVERTISED_100baseT_Half |
10375 ADVERTISED_100baseT_Full |
10376 ADVERTISED_10baseT_Half |
10377 ADVERTISED_10baseT_Full |
10380 mask |= ADVERTISED_FIBRE;
10382 if (cmd->advertising & ~mask)
10385 mask &= (ADVERTISED_1000baseT_Half |
10386 ADVERTISED_1000baseT_Full |
10387 ADVERTISED_100baseT_Half |
10388 ADVERTISED_100baseT_Full |
10389 ADVERTISED_10baseT_Half |
10390 ADVERTISED_10baseT_Full);
10392 cmd->advertising &= mask;
10394 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10395 if (speed != SPEED_1000)
10398 if (cmd->duplex != DUPLEX_FULL)
10401 if (speed != SPEED_100 &&
10407 tg3_full_lock(tp, 0);
10409 tp->link_config.autoneg = cmd->autoneg;
10410 if (cmd->autoneg == AUTONEG_ENABLE) {
10411 tp->link_config.advertising = (cmd->advertising |
10412 ADVERTISED_Autoneg);
10413 tp->link_config.speed = SPEED_INVALID;
10414 tp->link_config.duplex = DUPLEX_INVALID;
10416 tp->link_config.advertising = 0;
10417 tp->link_config.speed = speed;
10418 tp->link_config.duplex = cmd->duplex;
10421 tp->link_config.orig_speed = tp->link_config.speed;
10422 tp->link_config.orig_duplex = tp->link_config.duplex;
10423 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10425 if (netif_running(dev))
10426 tg3_setup_phy(tp, 1);
10428 tg3_full_unlock(tp);
10433 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10435 struct tg3 *tp = netdev_priv(dev);
10437 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10438 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10439 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10440 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10443 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10445 struct tg3 *tp = netdev_priv(dev);
10447 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10448 wol->supported = WAKE_MAGIC;
10450 wol->supported = 0;
10452 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10453 wol->wolopts = WAKE_MAGIC;
10454 memset(&wol->sopass, 0, sizeof(wol->sopass));
10457 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10459 struct tg3 *tp = netdev_priv(dev);
10460 struct device *dp = &tp->pdev->dev;
10462 if (wol->wolopts & ~WAKE_MAGIC)
10464 if ((wol->wolopts & WAKE_MAGIC) &&
10465 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10468 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10470 spin_lock_bh(&tp->lock);
10471 if (device_may_wakeup(dp))
10472 tg3_flag_set(tp, WOL_ENABLE);
10474 tg3_flag_clear(tp, WOL_ENABLE);
10475 spin_unlock_bh(&tp->lock);
10480 static u32 tg3_get_msglevel(struct net_device *dev)
10482 struct tg3 *tp = netdev_priv(dev);
10483 return tp->msg_enable;
10486 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10488 struct tg3 *tp = netdev_priv(dev);
10489 tp->msg_enable = value;
10492 static int tg3_nway_reset(struct net_device *dev)
10494 struct tg3 *tp = netdev_priv(dev);
10497 if (!netif_running(dev))
10500 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10503 if (tg3_flag(tp, USE_PHYLIB)) {
10504 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10506 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10510 spin_lock_bh(&tp->lock);
10512 tg3_readphy(tp, MII_BMCR, &bmcr);
10513 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10514 ((bmcr & BMCR_ANENABLE) ||
10515 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10516 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10520 spin_unlock_bh(&tp->lock);
10526 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10528 struct tg3 *tp = netdev_priv(dev);
10530 ering->rx_max_pending = tp->rx_std_ring_mask;
10531 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10532 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10534 ering->rx_jumbo_max_pending = 0;
10536 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10538 ering->rx_pending = tp->rx_pending;
10539 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10540 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10542 ering->rx_jumbo_pending = 0;
10544 ering->tx_pending = tp->napi[0].tx_pending;
10547 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10549 struct tg3 *tp = netdev_priv(dev);
10550 int i, irq_sync = 0, err = 0;
10552 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10553 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10554 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10555 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10556 (tg3_flag(tp, TSO_BUG) &&
10557 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10560 if (netif_running(dev)) {
10562 tg3_netif_stop(tp);
10566 tg3_full_lock(tp, irq_sync);
10568 tp->rx_pending = ering->rx_pending;
10570 if (tg3_flag(tp, MAX_RXPEND_64) &&
10571 tp->rx_pending > 63)
10572 tp->rx_pending = 63;
10573 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10575 for (i = 0; i < tp->irq_max; i++)
10576 tp->napi[i].tx_pending = ering->tx_pending;
10578 if (netif_running(dev)) {
10579 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10580 err = tg3_restart_hw(tp, 1);
10582 tg3_netif_start(tp);
10585 tg3_full_unlock(tp);
10587 if (irq_sync && !err)
10593 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10595 struct tg3 *tp = netdev_priv(dev);
10597 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10599 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10600 epause->rx_pause = 1;
10602 epause->rx_pause = 0;
10604 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10605 epause->tx_pause = 1;
10607 epause->tx_pause = 0;
10610 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10612 struct tg3 *tp = netdev_priv(dev);
10615 if (tg3_flag(tp, USE_PHYLIB)) {
10617 struct phy_device *phydev;
10619 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10621 if (!(phydev->supported & SUPPORTED_Pause) ||
10622 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10623 (epause->rx_pause != epause->tx_pause)))
10626 tp->link_config.flowctrl = 0;
10627 if (epause->rx_pause) {
10628 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10630 if (epause->tx_pause) {
10631 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10632 newadv = ADVERTISED_Pause;
10634 newadv = ADVERTISED_Pause |
10635 ADVERTISED_Asym_Pause;
10636 } else if (epause->tx_pause) {
10637 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10638 newadv = ADVERTISED_Asym_Pause;
10642 if (epause->autoneg)
10643 tg3_flag_set(tp, PAUSE_AUTONEG);
10645 tg3_flag_clear(tp, PAUSE_AUTONEG);
10647 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10648 u32 oldadv = phydev->advertising &
10649 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10650 if (oldadv != newadv) {
10651 phydev->advertising &=
10652 ~(ADVERTISED_Pause |
10653 ADVERTISED_Asym_Pause);
10654 phydev->advertising |= newadv;
10655 if (phydev->autoneg) {
10657 * Always renegotiate the link to
10658 * inform our link partner of our
10659 * flow control settings, even if the
10660 * flow control is forced. Let
10661 * tg3_adjust_link() do the final
10662 * flow control setup.
10664 return phy_start_aneg(phydev);
10668 if (!epause->autoneg)
10669 tg3_setup_flow_control(tp, 0, 0);
10671 tp->link_config.orig_advertising &=
10672 ~(ADVERTISED_Pause |
10673 ADVERTISED_Asym_Pause);
10674 tp->link_config.orig_advertising |= newadv;
10679 if (netif_running(dev)) {
10680 tg3_netif_stop(tp);
10684 tg3_full_lock(tp, irq_sync);
10686 if (epause->autoneg)
10687 tg3_flag_set(tp, PAUSE_AUTONEG);
10689 tg3_flag_clear(tp, PAUSE_AUTONEG);
10690 if (epause->rx_pause)
10691 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10693 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10694 if (epause->tx_pause)
10695 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10697 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10699 if (netif_running(dev)) {
10700 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10701 err = tg3_restart_hw(tp, 1);
10703 tg3_netif_start(tp);
10706 tg3_full_unlock(tp);
10712 static int tg3_get_sset_count(struct net_device *dev, int sset)
10716 return TG3_NUM_TEST;
10718 return TG3_NUM_STATS;
10720 return -EOPNOTSUPP;
10724 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10726 switch (stringset) {
10728 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10731 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10734 WARN_ON(1); /* we need a WARN() */
10739 static int tg3_set_phys_id(struct net_device *dev,
10740 enum ethtool_phys_id_state state)
10742 struct tg3 *tp = netdev_priv(dev);
10744 if (!netif_running(tp->dev))
10748 case ETHTOOL_ID_ACTIVE:
10749 return 1; /* cycle on/off once per second */
10751 case ETHTOOL_ID_ON:
10752 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10753 LED_CTRL_1000MBPS_ON |
10754 LED_CTRL_100MBPS_ON |
10755 LED_CTRL_10MBPS_ON |
10756 LED_CTRL_TRAFFIC_OVERRIDE |
10757 LED_CTRL_TRAFFIC_BLINK |
10758 LED_CTRL_TRAFFIC_LED);
10761 case ETHTOOL_ID_OFF:
10762 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10763 LED_CTRL_TRAFFIC_OVERRIDE);
10766 case ETHTOOL_ID_INACTIVE:
10767 tw32(MAC_LED_CTRL, tp->led_ctrl);
10774 static void tg3_get_ethtool_stats(struct net_device *dev,
10775 struct ethtool_stats *estats, u64 *tmp_stats)
10777 struct tg3 *tp = netdev_priv(dev);
10779 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
10782 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10786 u32 offset = 0, len = 0;
10789 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10792 if (magic == TG3_EEPROM_MAGIC) {
10793 for (offset = TG3_NVM_DIR_START;
10794 offset < TG3_NVM_DIR_END;
10795 offset += TG3_NVM_DIRENT_SIZE) {
10796 if (tg3_nvram_read(tp, offset, &val))
10799 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10800 TG3_NVM_DIRTYPE_EXTVPD)
10804 if (offset != TG3_NVM_DIR_END) {
10805 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10806 if (tg3_nvram_read(tp, offset + 4, &offset))
10809 offset = tg3_nvram_logical_addr(tp, offset);
10813 if (!offset || !len) {
10814 offset = TG3_NVM_VPD_OFF;
10815 len = TG3_NVM_VPD_LEN;
10818 buf = kmalloc(len, GFP_KERNEL);
10822 if (magic == TG3_EEPROM_MAGIC) {
10823 for (i = 0; i < len; i += 4) {
10824 /* The data is in little-endian format in NVRAM.
10825 * Use the big-endian read routines to preserve
10826 * the byte order as it exists in NVRAM.
10828 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10834 unsigned int pos = 0;
10836 ptr = (u8 *)&buf[0];
10837 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10838 cnt = pci_read_vpd(tp->pdev, pos,
10840 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10858 #define NVRAM_TEST_SIZE 0x100
10859 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10860 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10861 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10862 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10863 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10864 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10865 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10866 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10868 static int tg3_test_nvram(struct tg3 *tp)
10870 u32 csum, magic, len;
10872 int i, j, k, err = 0, size;
10874 if (tg3_flag(tp, NO_NVRAM))
10877 if (tg3_nvram_read(tp, 0, &magic) != 0)
10880 if (magic == TG3_EEPROM_MAGIC)
10881 size = NVRAM_TEST_SIZE;
10882 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10883 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10884 TG3_EEPROM_SB_FORMAT_1) {
10885 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10886 case TG3_EEPROM_SB_REVISION_0:
10887 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10889 case TG3_EEPROM_SB_REVISION_2:
10890 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10892 case TG3_EEPROM_SB_REVISION_3:
10893 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10895 case TG3_EEPROM_SB_REVISION_4:
10896 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10898 case TG3_EEPROM_SB_REVISION_5:
10899 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10901 case TG3_EEPROM_SB_REVISION_6:
10902 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10909 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10910 size = NVRAM_SELFBOOT_HW_SIZE;
10914 buf = kmalloc(size, GFP_KERNEL);
10919 for (i = 0, j = 0; i < size; i += 4, j++) {
10920 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10927 /* Selfboot format */
10928 magic = be32_to_cpu(buf[0]);
10929 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10930 TG3_EEPROM_MAGIC_FW) {
10931 u8 *buf8 = (u8 *) buf, csum8 = 0;
10933 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10934 TG3_EEPROM_SB_REVISION_2) {
10935 /* For rev 2, the csum doesn't include the MBA. */
10936 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10938 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10941 for (i = 0; i < size; i++)
10954 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10955 TG3_EEPROM_MAGIC_HW) {
10956 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10957 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10958 u8 *buf8 = (u8 *) buf;
10960 /* Separate the parity bits and the data bytes. */
10961 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10962 if ((i == 0) || (i == 8)) {
10966 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10967 parity[k++] = buf8[i] & msk;
10969 } else if (i == 16) {
10973 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10974 parity[k++] = buf8[i] & msk;
10977 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10978 parity[k++] = buf8[i] & msk;
10981 data[j++] = buf8[i];
10985 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10986 u8 hw8 = hweight8(data[i]);
10988 if ((hw8 & 0x1) && parity[i])
10990 else if (!(hw8 & 0x1) && !parity[i])
10999 /* Bootstrap checksum at offset 0x10 */
11000 csum = calc_crc((unsigned char *) buf, 0x10);
11001 if (csum != le32_to_cpu(buf[0x10/4]))
11004 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11005 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11006 if (csum != le32_to_cpu(buf[0xfc/4]))
11011 buf = tg3_vpd_readblock(tp, &len);
11015 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11017 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11021 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11024 i += PCI_VPD_LRDT_TAG_SIZE;
11025 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11026 PCI_VPD_RO_KEYWORD_CHKSUM);
11030 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11032 for (i = 0; i <= j; i++)
11033 csum8 += ((u8 *)buf)[i];
11047 #define TG3_SERDES_TIMEOUT_SEC 2
11048 #define TG3_COPPER_TIMEOUT_SEC 6
11050 static int tg3_test_link(struct tg3 *tp)
11054 if (!netif_running(tp->dev))
11057 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11058 max = TG3_SERDES_TIMEOUT_SEC;
11060 max = TG3_COPPER_TIMEOUT_SEC;
11062 for (i = 0; i < max; i++) {
11063 if (netif_carrier_ok(tp->dev))
11066 if (msleep_interruptible(1000))
11073 /* Only test the commonly used registers */
11074 static int tg3_test_registers(struct tg3 *tp)
11076 int i, is_5705, is_5750;
11077 u32 offset, read_mask, write_mask, val, save_val, read_val;
11081 #define TG3_FL_5705 0x1
11082 #define TG3_FL_NOT_5705 0x2
11083 #define TG3_FL_NOT_5788 0x4
11084 #define TG3_FL_NOT_5750 0x8
11088 /* MAC Control Registers */
11089 { MAC_MODE, TG3_FL_NOT_5705,
11090 0x00000000, 0x00ef6f8c },
11091 { MAC_MODE, TG3_FL_5705,
11092 0x00000000, 0x01ef6b8c },
11093 { MAC_STATUS, TG3_FL_NOT_5705,
11094 0x03800107, 0x00000000 },
11095 { MAC_STATUS, TG3_FL_5705,
11096 0x03800100, 0x00000000 },
11097 { MAC_ADDR_0_HIGH, 0x0000,
11098 0x00000000, 0x0000ffff },
11099 { MAC_ADDR_0_LOW, 0x0000,
11100 0x00000000, 0xffffffff },
11101 { MAC_RX_MTU_SIZE, 0x0000,
11102 0x00000000, 0x0000ffff },
11103 { MAC_TX_MODE, 0x0000,
11104 0x00000000, 0x00000070 },
11105 { MAC_TX_LENGTHS, 0x0000,
11106 0x00000000, 0x00003fff },
11107 { MAC_RX_MODE, TG3_FL_NOT_5705,
11108 0x00000000, 0x000007fc },
11109 { MAC_RX_MODE, TG3_FL_5705,
11110 0x00000000, 0x000007dc },
11111 { MAC_HASH_REG_0, 0x0000,
11112 0x00000000, 0xffffffff },
11113 { MAC_HASH_REG_1, 0x0000,
11114 0x00000000, 0xffffffff },
11115 { MAC_HASH_REG_2, 0x0000,
11116 0x00000000, 0xffffffff },
11117 { MAC_HASH_REG_3, 0x0000,
11118 0x00000000, 0xffffffff },
11120 /* Receive Data and Receive BD Initiator Control Registers. */
11121 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11122 0x00000000, 0xffffffff },
11123 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11124 0x00000000, 0xffffffff },
11125 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11126 0x00000000, 0x00000003 },
11127 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11128 0x00000000, 0xffffffff },
11129 { RCVDBDI_STD_BD+0, 0x0000,
11130 0x00000000, 0xffffffff },
11131 { RCVDBDI_STD_BD+4, 0x0000,
11132 0x00000000, 0xffffffff },
11133 { RCVDBDI_STD_BD+8, 0x0000,
11134 0x00000000, 0xffff0002 },
11135 { RCVDBDI_STD_BD+0xc, 0x0000,
11136 0x00000000, 0xffffffff },
11138 /* Receive BD Initiator Control Registers. */
11139 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11140 0x00000000, 0xffffffff },
11141 { RCVBDI_STD_THRESH, TG3_FL_5705,
11142 0x00000000, 0x000003ff },
11143 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11144 0x00000000, 0xffffffff },
11146 /* Host Coalescing Control Registers. */
11147 { HOSTCC_MODE, TG3_FL_NOT_5705,
11148 0x00000000, 0x00000004 },
11149 { HOSTCC_MODE, TG3_FL_5705,
11150 0x00000000, 0x000000f6 },
11151 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11152 0x00000000, 0xffffffff },
11153 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11154 0x00000000, 0x000003ff },
11155 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11156 0x00000000, 0xffffffff },
11157 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11158 0x00000000, 0x000003ff },
11159 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11160 0x00000000, 0xffffffff },
11161 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11162 0x00000000, 0x000000ff },
11163 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11164 0x00000000, 0xffffffff },
11165 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11166 0x00000000, 0x000000ff },
11167 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11168 0x00000000, 0xffffffff },
11169 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11170 0x00000000, 0xffffffff },
11171 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11172 0x00000000, 0xffffffff },
11173 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11174 0x00000000, 0x000000ff },
11175 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11176 0x00000000, 0xffffffff },
11177 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11178 0x00000000, 0x000000ff },
11179 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11180 0x00000000, 0xffffffff },
11181 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11182 0x00000000, 0xffffffff },
11183 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11184 0x00000000, 0xffffffff },
11185 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11186 0x00000000, 0xffffffff },
11187 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11188 0x00000000, 0xffffffff },
11189 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11190 0xffffffff, 0x00000000 },
11191 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11192 0xffffffff, 0x00000000 },
11194 /* Buffer Manager Control Registers. */
11195 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11196 0x00000000, 0x007fff80 },
11197 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11198 0x00000000, 0x007fffff },
11199 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11200 0x00000000, 0x0000003f },
11201 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11202 0x00000000, 0x000001ff },
11203 { BUFMGR_MB_HIGH_WATER, 0x0000,
11204 0x00000000, 0x000001ff },
11205 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11206 0xffffffff, 0x00000000 },
11207 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11208 0xffffffff, 0x00000000 },
11210 /* Mailbox Registers */
11211 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11212 0x00000000, 0x000001ff },
11213 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11214 0x00000000, 0x000001ff },
11215 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11216 0x00000000, 0x000007ff },
11217 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11218 0x00000000, 0x000001ff },
11220 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11223 is_5705 = is_5750 = 0;
11224 if (tg3_flag(tp, 5705_PLUS)) {
11226 if (tg3_flag(tp, 5750_PLUS))
11230 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11231 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11234 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11237 if (tg3_flag(tp, IS_5788) &&
11238 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11241 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11244 offset = (u32) reg_tbl[i].offset;
11245 read_mask = reg_tbl[i].read_mask;
11246 write_mask = reg_tbl[i].write_mask;
11248 /* Save the original register content */
11249 save_val = tr32(offset);
11251 /* Determine the read-only value. */
11252 read_val = save_val & read_mask;
11254 /* Write zero to the register, then make sure the read-only bits
11255 * are not changed and the read/write bits are all zeros.
11259 val = tr32(offset);
11261 /* Test the read-only and read/write bits. */
11262 if (((val & read_mask) != read_val) || (val & write_mask))
11265 /* Write ones to all the bits defined by RdMask and WrMask, then
11266 * make sure the read-only bits are not changed and the
11267 * read/write bits are all ones.
11269 tw32(offset, read_mask | write_mask);
11271 val = tr32(offset);
11273 /* Test the read-only bits. */
11274 if ((val & read_mask) != read_val)
11277 /* Test the read/write bits. */
11278 if ((val & write_mask) != write_mask)
11281 tw32(offset, save_val);
11287 if (netif_msg_hw(tp))
11288 netdev_err(tp->dev,
11289 "Register test failed at offset %x\n", offset);
11290 tw32(offset, save_val);
11294 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11296 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11300 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11301 for (j = 0; j < len; j += 4) {
11304 tg3_write_mem(tp, offset + j, test_pattern[i]);
11305 tg3_read_mem(tp, offset + j, &val);
11306 if (val != test_pattern[i])
11313 static int tg3_test_memory(struct tg3 *tp)
11315 static struct mem_entry {
11318 } mem_tbl_570x[] = {
11319 { 0x00000000, 0x00b50},
11320 { 0x00002000, 0x1c000},
11321 { 0xffffffff, 0x00000}
11322 }, mem_tbl_5705[] = {
11323 { 0x00000100, 0x0000c},
11324 { 0x00000200, 0x00008},
11325 { 0x00004000, 0x00800},
11326 { 0x00006000, 0x01000},
11327 { 0x00008000, 0x02000},
11328 { 0x00010000, 0x0e000},
11329 { 0xffffffff, 0x00000}
11330 }, mem_tbl_5755[] = {
11331 { 0x00000200, 0x00008},
11332 { 0x00004000, 0x00800},
11333 { 0x00006000, 0x00800},
11334 { 0x00008000, 0x02000},
11335 { 0x00010000, 0x0c000},
11336 { 0xffffffff, 0x00000}
11337 }, mem_tbl_5906[] = {
11338 { 0x00000200, 0x00008},
11339 { 0x00004000, 0x00400},
11340 { 0x00006000, 0x00400},
11341 { 0x00008000, 0x01000},
11342 { 0x00010000, 0x01000},
11343 { 0xffffffff, 0x00000}
11344 }, mem_tbl_5717[] = {
11345 { 0x00000200, 0x00008},
11346 { 0x00010000, 0x0a000},
11347 { 0x00020000, 0x13c00},
11348 { 0xffffffff, 0x00000}
11349 }, mem_tbl_57765[] = {
11350 { 0x00000200, 0x00008},
11351 { 0x00004000, 0x00800},
11352 { 0x00006000, 0x09800},
11353 { 0x00010000, 0x0a000},
11354 { 0xffffffff, 0x00000}
11356 struct mem_entry *mem_tbl;
11360 if (tg3_flag(tp, 5717_PLUS))
11361 mem_tbl = mem_tbl_5717;
11362 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11363 mem_tbl = mem_tbl_57765;
11364 else if (tg3_flag(tp, 5755_PLUS))
11365 mem_tbl = mem_tbl_5755;
11366 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11367 mem_tbl = mem_tbl_5906;
11368 else if (tg3_flag(tp, 5705_PLUS))
11369 mem_tbl = mem_tbl_5705;
11371 mem_tbl = mem_tbl_570x;
11373 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11374 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11382 #define TG3_TSO_MSS 500
11384 #define TG3_TSO_IP_HDR_LEN 20
11385 #define TG3_TSO_TCP_HDR_LEN 20
11386 #define TG3_TSO_TCP_OPT_LEN 12
11388 static const u8 tg3_tso_header[] = {
11390 0x45, 0x00, 0x00, 0x00,
11391 0x00, 0x00, 0x40, 0x00,
11392 0x40, 0x06, 0x00, 0x00,
11393 0x0a, 0x00, 0x00, 0x01,
11394 0x0a, 0x00, 0x00, 0x02,
11395 0x0d, 0x00, 0xe0, 0x00,
11396 0x00, 0x00, 0x01, 0x00,
11397 0x00, 0x00, 0x02, 0x00,
11398 0x80, 0x10, 0x10, 0x00,
11399 0x14, 0x09, 0x00, 0x00,
11400 0x01, 0x01, 0x08, 0x0a,
11401 0x11, 0x11, 0x11, 0x11,
11402 0x11, 0x11, 0x11, 0x11,
11405 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11407 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11408 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11410 struct sk_buff *skb;
11411 u8 *tx_data, *rx_data;
11413 int num_pkts, tx_len, rx_len, i, err;
11414 struct tg3_rx_buffer_desc *desc;
11415 struct tg3_napi *tnapi, *rnapi;
11416 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11418 tnapi = &tp->napi[0];
11419 rnapi = &tp->napi[0];
11420 if (tp->irq_cnt > 1) {
11421 if (tg3_flag(tp, ENABLE_RSS))
11422 rnapi = &tp->napi[1];
11423 if (tg3_flag(tp, ENABLE_TSS))
11424 tnapi = &tp->napi[1];
11426 coal_now = tnapi->coal_now | rnapi->coal_now;
11431 skb = netdev_alloc_skb(tp->dev, tx_len);
11435 tx_data = skb_put(skb, tx_len);
11436 memcpy(tx_data, tp->dev->dev_addr, 6);
11437 memset(tx_data + 6, 0x0, 8);
11439 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11441 if (tso_loopback) {
11442 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11444 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11445 TG3_TSO_TCP_OPT_LEN;
11447 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11448 sizeof(tg3_tso_header));
11451 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11452 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11454 /* Set the total length field in the IP header */
11455 iph->tot_len = htons((u16)(mss + hdr_len));
11457 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11458 TXD_FLAG_CPU_POST_DMA);
11460 if (tg3_flag(tp, HW_TSO_1) ||
11461 tg3_flag(tp, HW_TSO_2) ||
11462 tg3_flag(tp, HW_TSO_3)) {
11464 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11465 th = (struct tcphdr *)&tx_data[val];
11468 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11470 if (tg3_flag(tp, HW_TSO_3)) {
11471 mss |= (hdr_len & 0xc) << 12;
11472 if (hdr_len & 0x10)
11473 base_flags |= 0x00000010;
11474 base_flags |= (hdr_len & 0x3e0) << 5;
11475 } else if (tg3_flag(tp, HW_TSO_2))
11476 mss |= hdr_len << 9;
11477 else if (tg3_flag(tp, HW_TSO_1) ||
11478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11479 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11481 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11484 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11487 data_off = ETH_HLEN;
11490 for (i = data_off; i < tx_len; i++)
11491 tx_data[i] = (u8) (i & 0xff);
11493 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11494 if (pci_dma_mapping_error(tp->pdev, map)) {
11495 dev_kfree_skb(skb);
11499 val = tnapi->tx_prod;
11500 tnapi->tx_buffers[val].skb = skb;
11501 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11503 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11508 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11510 budget = tg3_tx_avail(tnapi);
11511 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11512 base_flags | TXD_FLAG_END, mss, 0)) {
11513 tnapi->tx_buffers[val].skb = NULL;
11514 dev_kfree_skb(skb);
11520 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11521 tr32_mailbox(tnapi->prodmbox);
11525 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11526 for (i = 0; i < 35; i++) {
11527 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11532 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11533 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11534 if ((tx_idx == tnapi->tx_prod) &&
11535 (rx_idx == (rx_start_idx + num_pkts)))
11539 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11540 dev_kfree_skb(skb);
11542 if (tx_idx != tnapi->tx_prod)
11545 if (rx_idx != rx_start_idx + num_pkts)
11549 while (rx_idx != rx_start_idx) {
11550 desc = &rnapi->rx_rcb[rx_start_idx++];
11551 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11552 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11554 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11555 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11558 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11561 if (!tso_loopback) {
11562 if (rx_len != tx_len)
11565 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11566 if (opaque_key != RXD_OPAQUE_RING_STD)
11569 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11572 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11573 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11574 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11578 if (opaque_key == RXD_OPAQUE_RING_STD) {
11579 rx_data = tpr->rx_std_buffers[desc_idx].data;
11580 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11582 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11583 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11584 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11589 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11590 PCI_DMA_FROMDEVICE);
11592 rx_data += TG3_RX_OFFSET(tp);
11593 for (i = data_off; i < rx_len; i++, val++) {
11594 if (*(rx_data + i) != (u8) (val & 0xff))
11601 /* tg3_free_rings will unmap and free the rx_data */
11606 #define TG3_STD_LOOPBACK_FAILED 1
11607 #define TG3_JMB_LOOPBACK_FAILED 2
11608 #define TG3_TSO_LOOPBACK_FAILED 4
11609 #define TG3_LOOPBACK_FAILED \
11610 (TG3_STD_LOOPBACK_FAILED | \
11611 TG3_JMB_LOOPBACK_FAILED | \
11612 TG3_TSO_LOOPBACK_FAILED)
11614 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11619 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11620 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11622 if (!netif_running(tp->dev)) {
11623 data[0] = TG3_LOOPBACK_FAILED;
11624 data[1] = TG3_LOOPBACK_FAILED;
11626 data[2] = TG3_LOOPBACK_FAILED;
11630 err = tg3_reset_hw(tp, 1);
11632 data[0] = TG3_LOOPBACK_FAILED;
11633 data[1] = TG3_LOOPBACK_FAILED;
11635 data[2] = TG3_LOOPBACK_FAILED;
11639 if (tg3_flag(tp, ENABLE_RSS)) {
11642 /* Reroute all rx packets to the 1st queue */
11643 for (i = MAC_RSS_INDIR_TBL_0;
11644 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11648 /* HW errata - mac loopback fails in some cases on 5780.
11649 * Normal traffic and PHY loopback are not affected by
11650 * errata. Also, the MAC loopback test is deprecated for
11651 * all newer ASIC revisions.
11653 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11654 !tg3_flag(tp, CPMU_PRESENT)) {
11655 tg3_mac_loopback(tp, true);
11657 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11658 data[0] |= TG3_STD_LOOPBACK_FAILED;
11660 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11661 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11662 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11664 tg3_mac_loopback(tp, false);
11667 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11668 !tg3_flag(tp, USE_PHYLIB)) {
11671 tg3_phy_lpbk_set(tp, 0, false);
11673 /* Wait for link */
11674 for (i = 0; i < 100; i++) {
11675 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11680 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11681 data[1] |= TG3_STD_LOOPBACK_FAILED;
11682 if (tg3_flag(tp, TSO_CAPABLE) &&
11683 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11684 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11685 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11686 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11687 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11690 tg3_phy_lpbk_set(tp, 0, true);
11692 /* All link indications report up, but the hardware
11693 * isn't really ready for about 20 msec. Double it
11698 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11699 data[2] |= TG3_STD_LOOPBACK_FAILED;
11700 if (tg3_flag(tp, TSO_CAPABLE) &&
11701 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11702 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11703 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11704 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11705 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11708 /* Re-enable gphy autopowerdown. */
11709 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11710 tg3_phy_toggle_apd(tp, true);
11713 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11716 tp->phy_flags |= eee_cap;
11721 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11724 struct tg3 *tp = netdev_priv(dev);
11725 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11727 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11728 tg3_power_up(tp)) {
11729 etest->flags |= ETH_TEST_FL_FAILED;
11730 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11734 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11736 if (tg3_test_nvram(tp) != 0) {
11737 etest->flags |= ETH_TEST_FL_FAILED;
11740 if (!doextlpbk && tg3_test_link(tp)) {
11741 etest->flags |= ETH_TEST_FL_FAILED;
11744 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11745 int err, err2 = 0, irq_sync = 0;
11747 if (netif_running(dev)) {
11749 tg3_netif_stop(tp);
11753 tg3_full_lock(tp, irq_sync);
11755 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11756 err = tg3_nvram_lock(tp);
11757 tg3_halt_cpu(tp, RX_CPU_BASE);
11758 if (!tg3_flag(tp, 5705_PLUS))
11759 tg3_halt_cpu(tp, TX_CPU_BASE);
11761 tg3_nvram_unlock(tp);
11763 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11766 if (tg3_test_registers(tp) != 0) {
11767 etest->flags |= ETH_TEST_FL_FAILED;
11771 if (tg3_test_memory(tp) != 0) {
11772 etest->flags |= ETH_TEST_FL_FAILED;
11777 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11779 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11780 etest->flags |= ETH_TEST_FL_FAILED;
11782 tg3_full_unlock(tp);
11784 if (tg3_test_interrupt(tp) != 0) {
11785 etest->flags |= ETH_TEST_FL_FAILED;
11789 tg3_full_lock(tp, 0);
11791 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11792 if (netif_running(dev)) {
11793 tg3_flag_set(tp, INIT_COMPLETE);
11794 err2 = tg3_restart_hw(tp, 1);
11796 tg3_netif_start(tp);
11799 tg3_full_unlock(tp);
11801 if (irq_sync && !err2)
11804 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11805 tg3_power_down(tp);
11809 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11811 struct mii_ioctl_data *data = if_mii(ifr);
11812 struct tg3 *tp = netdev_priv(dev);
11815 if (tg3_flag(tp, USE_PHYLIB)) {
11816 struct phy_device *phydev;
11817 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11819 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11820 return phy_mii_ioctl(phydev, ifr, cmd);
11825 data->phy_id = tp->phy_addr;
11828 case SIOCGMIIREG: {
11831 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11832 break; /* We have no PHY */
11834 if (!netif_running(dev))
11837 spin_lock_bh(&tp->lock);
11838 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11839 spin_unlock_bh(&tp->lock);
11841 data->val_out = mii_regval;
11847 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11848 break; /* We have no PHY */
11850 if (!netif_running(dev))
11853 spin_lock_bh(&tp->lock);
11854 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11855 spin_unlock_bh(&tp->lock);
11863 return -EOPNOTSUPP;
11866 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11868 struct tg3 *tp = netdev_priv(dev);
11870 memcpy(ec, &tp->coal, sizeof(*ec));
11874 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11876 struct tg3 *tp = netdev_priv(dev);
11877 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11878 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11880 if (!tg3_flag(tp, 5705_PLUS)) {
11881 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11882 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11883 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11884 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11887 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11888 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11889 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11890 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11891 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11892 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11893 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11894 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11895 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11896 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11899 /* No rx interrupts will be generated if both are zero */
11900 if ((ec->rx_coalesce_usecs == 0) &&
11901 (ec->rx_max_coalesced_frames == 0))
11904 /* No tx interrupts will be generated if both are zero */
11905 if ((ec->tx_coalesce_usecs == 0) &&
11906 (ec->tx_max_coalesced_frames == 0))
11909 /* Only copy relevant parameters, ignore all others. */
11910 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11911 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11912 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11913 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11914 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11915 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11916 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11917 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11918 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11920 if (netif_running(dev)) {
11921 tg3_full_lock(tp, 0);
11922 __tg3_set_coalesce(tp, &tp->coal);
11923 tg3_full_unlock(tp);
11928 static const struct ethtool_ops tg3_ethtool_ops = {
11929 .get_settings = tg3_get_settings,
11930 .set_settings = tg3_set_settings,
11931 .get_drvinfo = tg3_get_drvinfo,
11932 .get_regs_len = tg3_get_regs_len,
11933 .get_regs = tg3_get_regs,
11934 .get_wol = tg3_get_wol,
11935 .set_wol = tg3_set_wol,
11936 .get_msglevel = tg3_get_msglevel,
11937 .set_msglevel = tg3_set_msglevel,
11938 .nway_reset = tg3_nway_reset,
11939 .get_link = ethtool_op_get_link,
11940 .get_eeprom_len = tg3_get_eeprom_len,
11941 .get_eeprom = tg3_get_eeprom,
11942 .set_eeprom = tg3_set_eeprom,
11943 .get_ringparam = tg3_get_ringparam,
11944 .set_ringparam = tg3_set_ringparam,
11945 .get_pauseparam = tg3_get_pauseparam,
11946 .set_pauseparam = tg3_set_pauseparam,
11947 .self_test = tg3_self_test,
11948 .get_strings = tg3_get_strings,
11949 .set_phys_id = tg3_set_phys_id,
11950 .get_ethtool_stats = tg3_get_ethtool_stats,
11951 .get_coalesce = tg3_get_coalesce,
11952 .set_coalesce = tg3_set_coalesce,
11953 .get_sset_count = tg3_get_sset_count,
11956 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11958 u32 cursize, val, magic;
11960 tp->nvram_size = EEPROM_CHIP_SIZE;
11962 if (tg3_nvram_read(tp, 0, &magic) != 0)
11965 if ((magic != TG3_EEPROM_MAGIC) &&
11966 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11967 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11971 * Size the chip by reading offsets at increasing powers of two.
11972 * When we encounter our validation signature, we know the addressing
11973 * has wrapped around, and thus have our chip size.
11977 while (cursize < tp->nvram_size) {
11978 if (tg3_nvram_read(tp, cursize, &val) != 0)
11987 tp->nvram_size = cursize;
11990 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11994 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11997 /* Selfboot format */
11998 if (val != TG3_EEPROM_MAGIC) {
11999 tg3_get_eeprom_size(tp);
12003 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12005 /* This is confusing. We want to operate on the
12006 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12007 * call will read from NVRAM and byteswap the data
12008 * according to the byteswapping settings for all
12009 * other register accesses. This ensures the data we
12010 * want will always reside in the lower 16-bits.
12011 * However, the data in NVRAM is in LE format, which
12012 * means the data from the NVRAM read will always be
12013 * opposite the endianness of the CPU. The 16-bit
12014 * byteswap then brings the data to CPU endianness.
12016 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12020 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12023 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12027 nvcfg1 = tr32(NVRAM_CFG1);
12028 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12029 tg3_flag_set(tp, FLASH);
12031 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12032 tw32(NVRAM_CFG1, nvcfg1);
12035 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12036 tg3_flag(tp, 5780_CLASS)) {
12037 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12038 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12039 tp->nvram_jedecnum = JEDEC_ATMEL;
12040 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12041 tg3_flag_set(tp, NVRAM_BUFFERED);
12043 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12044 tp->nvram_jedecnum = JEDEC_ATMEL;
12045 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12047 case FLASH_VENDOR_ATMEL_EEPROM:
12048 tp->nvram_jedecnum = JEDEC_ATMEL;
12049 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12050 tg3_flag_set(tp, NVRAM_BUFFERED);
12052 case FLASH_VENDOR_ST:
12053 tp->nvram_jedecnum = JEDEC_ST;
12054 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12055 tg3_flag_set(tp, NVRAM_BUFFERED);
12057 case FLASH_VENDOR_SAIFUN:
12058 tp->nvram_jedecnum = JEDEC_SAIFUN;
12059 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12061 case FLASH_VENDOR_SST_SMALL:
12062 case FLASH_VENDOR_SST_LARGE:
12063 tp->nvram_jedecnum = JEDEC_SST;
12064 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12068 tp->nvram_jedecnum = JEDEC_ATMEL;
12069 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12070 tg3_flag_set(tp, NVRAM_BUFFERED);
12074 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12076 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12077 case FLASH_5752PAGE_SIZE_256:
12078 tp->nvram_pagesize = 256;
12080 case FLASH_5752PAGE_SIZE_512:
12081 tp->nvram_pagesize = 512;
12083 case FLASH_5752PAGE_SIZE_1K:
12084 tp->nvram_pagesize = 1024;
12086 case FLASH_5752PAGE_SIZE_2K:
12087 tp->nvram_pagesize = 2048;
12089 case FLASH_5752PAGE_SIZE_4K:
12090 tp->nvram_pagesize = 4096;
12092 case FLASH_5752PAGE_SIZE_264:
12093 tp->nvram_pagesize = 264;
12095 case FLASH_5752PAGE_SIZE_528:
12096 tp->nvram_pagesize = 528;
12101 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12105 nvcfg1 = tr32(NVRAM_CFG1);
12107 /* NVRAM protection for TPM */
12108 if (nvcfg1 & (1 << 27))
12109 tg3_flag_set(tp, PROTECTED_NVRAM);
12111 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12112 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12113 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12114 tp->nvram_jedecnum = JEDEC_ATMEL;
12115 tg3_flag_set(tp, NVRAM_BUFFERED);
12117 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12118 tp->nvram_jedecnum = JEDEC_ATMEL;
12119 tg3_flag_set(tp, NVRAM_BUFFERED);
12120 tg3_flag_set(tp, FLASH);
12122 case FLASH_5752VENDOR_ST_M45PE10:
12123 case FLASH_5752VENDOR_ST_M45PE20:
12124 case FLASH_5752VENDOR_ST_M45PE40:
12125 tp->nvram_jedecnum = JEDEC_ST;
12126 tg3_flag_set(tp, NVRAM_BUFFERED);
12127 tg3_flag_set(tp, FLASH);
12131 if (tg3_flag(tp, FLASH)) {
12132 tg3_nvram_get_pagesize(tp, nvcfg1);
12134 /* For eeprom, set pagesize to maximum eeprom size */
12135 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12137 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12138 tw32(NVRAM_CFG1, nvcfg1);
12142 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12144 u32 nvcfg1, protect = 0;
12146 nvcfg1 = tr32(NVRAM_CFG1);
12148 /* NVRAM protection for TPM */
12149 if (nvcfg1 & (1 << 27)) {
12150 tg3_flag_set(tp, PROTECTED_NVRAM);
12154 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12156 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12157 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12158 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12159 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12160 tp->nvram_jedecnum = JEDEC_ATMEL;
12161 tg3_flag_set(tp, NVRAM_BUFFERED);
12162 tg3_flag_set(tp, FLASH);
12163 tp->nvram_pagesize = 264;
12164 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12165 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12166 tp->nvram_size = (protect ? 0x3e200 :
12167 TG3_NVRAM_SIZE_512KB);
12168 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12169 tp->nvram_size = (protect ? 0x1f200 :
12170 TG3_NVRAM_SIZE_256KB);
12172 tp->nvram_size = (protect ? 0x1f200 :
12173 TG3_NVRAM_SIZE_128KB);
12175 case FLASH_5752VENDOR_ST_M45PE10:
12176 case FLASH_5752VENDOR_ST_M45PE20:
12177 case FLASH_5752VENDOR_ST_M45PE40:
12178 tp->nvram_jedecnum = JEDEC_ST;
12179 tg3_flag_set(tp, NVRAM_BUFFERED);
12180 tg3_flag_set(tp, FLASH);
12181 tp->nvram_pagesize = 256;
12182 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12183 tp->nvram_size = (protect ?
12184 TG3_NVRAM_SIZE_64KB :
12185 TG3_NVRAM_SIZE_128KB);
12186 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12187 tp->nvram_size = (protect ?
12188 TG3_NVRAM_SIZE_64KB :
12189 TG3_NVRAM_SIZE_256KB);
12191 tp->nvram_size = (protect ?
12192 TG3_NVRAM_SIZE_128KB :
12193 TG3_NVRAM_SIZE_512KB);
12198 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12202 nvcfg1 = tr32(NVRAM_CFG1);
12204 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12205 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12206 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12207 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12208 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12209 tp->nvram_jedecnum = JEDEC_ATMEL;
12210 tg3_flag_set(tp, NVRAM_BUFFERED);
12211 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12213 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12214 tw32(NVRAM_CFG1, nvcfg1);
12216 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12217 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12218 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12219 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12220 tp->nvram_jedecnum = JEDEC_ATMEL;
12221 tg3_flag_set(tp, NVRAM_BUFFERED);
12222 tg3_flag_set(tp, FLASH);
12223 tp->nvram_pagesize = 264;
12225 case FLASH_5752VENDOR_ST_M45PE10:
12226 case FLASH_5752VENDOR_ST_M45PE20:
12227 case FLASH_5752VENDOR_ST_M45PE40:
12228 tp->nvram_jedecnum = JEDEC_ST;
12229 tg3_flag_set(tp, NVRAM_BUFFERED);
12230 tg3_flag_set(tp, FLASH);
12231 tp->nvram_pagesize = 256;
12236 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12238 u32 nvcfg1, protect = 0;
12240 nvcfg1 = tr32(NVRAM_CFG1);
12242 /* NVRAM protection for TPM */
12243 if (nvcfg1 & (1 << 27)) {
12244 tg3_flag_set(tp, PROTECTED_NVRAM);
12248 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12250 case FLASH_5761VENDOR_ATMEL_ADB021D:
12251 case FLASH_5761VENDOR_ATMEL_ADB041D:
12252 case FLASH_5761VENDOR_ATMEL_ADB081D:
12253 case FLASH_5761VENDOR_ATMEL_ADB161D:
12254 case FLASH_5761VENDOR_ATMEL_MDB021D:
12255 case FLASH_5761VENDOR_ATMEL_MDB041D:
12256 case FLASH_5761VENDOR_ATMEL_MDB081D:
12257 case FLASH_5761VENDOR_ATMEL_MDB161D:
12258 tp->nvram_jedecnum = JEDEC_ATMEL;
12259 tg3_flag_set(tp, NVRAM_BUFFERED);
12260 tg3_flag_set(tp, FLASH);
12261 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12262 tp->nvram_pagesize = 256;
12264 case FLASH_5761VENDOR_ST_A_M45PE20:
12265 case FLASH_5761VENDOR_ST_A_M45PE40:
12266 case FLASH_5761VENDOR_ST_A_M45PE80:
12267 case FLASH_5761VENDOR_ST_A_M45PE16:
12268 case FLASH_5761VENDOR_ST_M_M45PE20:
12269 case FLASH_5761VENDOR_ST_M_M45PE40:
12270 case FLASH_5761VENDOR_ST_M_M45PE80:
12271 case FLASH_5761VENDOR_ST_M_M45PE16:
12272 tp->nvram_jedecnum = JEDEC_ST;
12273 tg3_flag_set(tp, NVRAM_BUFFERED);
12274 tg3_flag_set(tp, FLASH);
12275 tp->nvram_pagesize = 256;
12280 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12283 case FLASH_5761VENDOR_ATMEL_ADB161D:
12284 case FLASH_5761VENDOR_ATMEL_MDB161D:
12285 case FLASH_5761VENDOR_ST_A_M45PE16:
12286 case FLASH_5761VENDOR_ST_M_M45PE16:
12287 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12289 case FLASH_5761VENDOR_ATMEL_ADB081D:
12290 case FLASH_5761VENDOR_ATMEL_MDB081D:
12291 case FLASH_5761VENDOR_ST_A_M45PE80:
12292 case FLASH_5761VENDOR_ST_M_M45PE80:
12293 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12295 case FLASH_5761VENDOR_ATMEL_ADB041D:
12296 case FLASH_5761VENDOR_ATMEL_MDB041D:
12297 case FLASH_5761VENDOR_ST_A_M45PE40:
12298 case FLASH_5761VENDOR_ST_M_M45PE40:
12299 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12301 case FLASH_5761VENDOR_ATMEL_ADB021D:
12302 case FLASH_5761VENDOR_ATMEL_MDB021D:
12303 case FLASH_5761VENDOR_ST_A_M45PE20:
12304 case FLASH_5761VENDOR_ST_M_M45PE20:
12305 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12311 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12313 tp->nvram_jedecnum = JEDEC_ATMEL;
12314 tg3_flag_set(tp, NVRAM_BUFFERED);
12315 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12318 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12322 nvcfg1 = tr32(NVRAM_CFG1);
12324 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12325 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12326 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12327 tp->nvram_jedecnum = JEDEC_ATMEL;
12328 tg3_flag_set(tp, NVRAM_BUFFERED);
12329 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12331 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12332 tw32(NVRAM_CFG1, nvcfg1);
12334 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12335 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12336 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12337 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12338 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12339 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12340 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12341 tp->nvram_jedecnum = JEDEC_ATMEL;
12342 tg3_flag_set(tp, NVRAM_BUFFERED);
12343 tg3_flag_set(tp, FLASH);
12345 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12346 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12347 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12348 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12349 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12351 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12352 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12353 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12355 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12356 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12357 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12361 case FLASH_5752VENDOR_ST_M45PE10:
12362 case FLASH_5752VENDOR_ST_M45PE20:
12363 case FLASH_5752VENDOR_ST_M45PE40:
12364 tp->nvram_jedecnum = JEDEC_ST;
12365 tg3_flag_set(tp, NVRAM_BUFFERED);
12366 tg3_flag_set(tp, FLASH);
12368 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12369 case FLASH_5752VENDOR_ST_M45PE10:
12370 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12372 case FLASH_5752VENDOR_ST_M45PE20:
12373 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12375 case FLASH_5752VENDOR_ST_M45PE40:
12376 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12381 tg3_flag_set(tp, NO_NVRAM);
12385 tg3_nvram_get_pagesize(tp, nvcfg1);
12386 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12387 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12391 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12395 nvcfg1 = tr32(NVRAM_CFG1);
12397 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12398 case FLASH_5717VENDOR_ATMEL_EEPROM:
12399 case FLASH_5717VENDOR_MICRO_EEPROM:
12400 tp->nvram_jedecnum = JEDEC_ATMEL;
12401 tg3_flag_set(tp, NVRAM_BUFFERED);
12402 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12404 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12405 tw32(NVRAM_CFG1, nvcfg1);
12407 case FLASH_5717VENDOR_ATMEL_MDB011D:
12408 case FLASH_5717VENDOR_ATMEL_ADB011B:
12409 case FLASH_5717VENDOR_ATMEL_ADB011D:
12410 case FLASH_5717VENDOR_ATMEL_MDB021D:
12411 case FLASH_5717VENDOR_ATMEL_ADB021B:
12412 case FLASH_5717VENDOR_ATMEL_ADB021D:
12413 case FLASH_5717VENDOR_ATMEL_45USPT:
12414 tp->nvram_jedecnum = JEDEC_ATMEL;
12415 tg3_flag_set(tp, NVRAM_BUFFERED);
12416 tg3_flag_set(tp, FLASH);
12418 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12419 case FLASH_5717VENDOR_ATMEL_MDB021D:
12420 /* Detect size with tg3_nvram_get_size() */
12422 case FLASH_5717VENDOR_ATMEL_ADB021B:
12423 case FLASH_5717VENDOR_ATMEL_ADB021D:
12424 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12427 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12431 case FLASH_5717VENDOR_ST_M_M25PE10:
12432 case FLASH_5717VENDOR_ST_A_M25PE10:
12433 case FLASH_5717VENDOR_ST_M_M45PE10:
12434 case FLASH_5717VENDOR_ST_A_M45PE10:
12435 case FLASH_5717VENDOR_ST_M_M25PE20:
12436 case FLASH_5717VENDOR_ST_A_M25PE20:
12437 case FLASH_5717VENDOR_ST_M_M45PE20:
12438 case FLASH_5717VENDOR_ST_A_M45PE20:
12439 case FLASH_5717VENDOR_ST_25USPT:
12440 case FLASH_5717VENDOR_ST_45USPT:
12441 tp->nvram_jedecnum = JEDEC_ST;
12442 tg3_flag_set(tp, NVRAM_BUFFERED);
12443 tg3_flag_set(tp, FLASH);
12445 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12446 case FLASH_5717VENDOR_ST_M_M25PE20:
12447 case FLASH_5717VENDOR_ST_M_M45PE20:
12448 /* Detect size with tg3_nvram_get_size() */
12450 case FLASH_5717VENDOR_ST_A_M25PE20:
12451 case FLASH_5717VENDOR_ST_A_M45PE20:
12452 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12455 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12460 tg3_flag_set(tp, NO_NVRAM);
12464 tg3_nvram_get_pagesize(tp, nvcfg1);
12465 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12466 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12469 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12471 u32 nvcfg1, nvmpinstrp;
12473 nvcfg1 = tr32(NVRAM_CFG1);
12474 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12476 switch (nvmpinstrp) {
12477 case FLASH_5720_EEPROM_HD:
12478 case FLASH_5720_EEPROM_LD:
12479 tp->nvram_jedecnum = JEDEC_ATMEL;
12480 tg3_flag_set(tp, NVRAM_BUFFERED);
12482 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12483 tw32(NVRAM_CFG1, nvcfg1);
12484 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12485 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12487 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12489 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12490 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12491 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12492 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12493 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12494 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12495 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12496 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12497 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12498 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12499 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12500 case FLASH_5720VENDOR_ATMEL_45USPT:
12501 tp->nvram_jedecnum = JEDEC_ATMEL;
12502 tg3_flag_set(tp, NVRAM_BUFFERED);
12503 tg3_flag_set(tp, FLASH);
12505 switch (nvmpinstrp) {
12506 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12507 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12508 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12509 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12511 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12512 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12513 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12514 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12516 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12517 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12518 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12521 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12525 case FLASH_5720VENDOR_M_ST_M25PE10:
12526 case FLASH_5720VENDOR_M_ST_M45PE10:
12527 case FLASH_5720VENDOR_A_ST_M25PE10:
12528 case FLASH_5720VENDOR_A_ST_M45PE10:
12529 case FLASH_5720VENDOR_M_ST_M25PE20:
12530 case FLASH_5720VENDOR_M_ST_M45PE20:
12531 case FLASH_5720VENDOR_A_ST_M25PE20:
12532 case FLASH_5720VENDOR_A_ST_M45PE20:
12533 case FLASH_5720VENDOR_M_ST_M25PE40:
12534 case FLASH_5720VENDOR_M_ST_M45PE40:
12535 case FLASH_5720VENDOR_A_ST_M25PE40:
12536 case FLASH_5720VENDOR_A_ST_M45PE40:
12537 case FLASH_5720VENDOR_M_ST_M25PE80:
12538 case FLASH_5720VENDOR_M_ST_M45PE80:
12539 case FLASH_5720VENDOR_A_ST_M25PE80:
12540 case FLASH_5720VENDOR_A_ST_M45PE80:
12541 case FLASH_5720VENDOR_ST_25USPT:
12542 case FLASH_5720VENDOR_ST_45USPT:
12543 tp->nvram_jedecnum = JEDEC_ST;
12544 tg3_flag_set(tp, NVRAM_BUFFERED);
12545 tg3_flag_set(tp, FLASH);
12547 switch (nvmpinstrp) {
12548 case FLASH_5720VENDOR_M_ST_M25PE20:
12549 case FLASH_5720VENDOR_M_ST_M45PE20:
12550 case FLASH_5720VENDOR_A_ST_M25PE20:
12551 case FLASH_5720VENDOR_A_ST_M45PE20:
12552 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12554 case FLASH_5720VENDOR_M_ST_M25PE40:
12555 case FLASH_5720VENDOR_M_ST_M45PE40:
12556 case FLASH_5720VENDOR_A_ST_M25PE40:
12557 case FLASH_5720VENDOR_A_ST_M45PE40:
12558 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12560 case FLASH_5720VENDOR_M_ST_M25PE80:
12561 case FLASH_5720VENDOR_M_ST_M45PE80:
12562 case FLASH_5720VENDOR_A_ST_M25PE80:
12563 case FLASH_5720VENDOR_A_ST_M45PE80:
12564 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12567 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12572 tg3_flag_set(tp, NO_NVRAM);
12576 tg3_nvram_get_pagesize(tp, nvcfg1);
12577 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12578 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12581 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12582 static void __devinit tg3_nvram_init(struct tg3 *tp)
12584 tw32_f(GRC_EEPROM_ADDR,
12585 (EEPROM_ADDR_FSM_RESET |
12586 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12587 EEPROM_ADDR_CLKPERD_SHIFT)));
12591 /* Enable seeprom accesses. */
12592 tw32_f(GRC_LOCAL_CTRL,
12593 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12596 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12597 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12598 tg3_flag_set(tp, NVRAM);
12600 if (tg3_nvram_lock(tp)) {
12601 netdev_warn(tp->dev,
12602 "Cannot get nvram lock, %s failed\n",
12606 tg3_enable_nvram_access(tp);
12608 tp->nvram_size = 0;
12610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12611 tg3_get_5752_nvram_info(tp);
12612 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12613 tg3_get_5755_nvram_info(tp);
12614 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12615 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12616 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12617 tg3_get_5787_nvram_info(tp);
12618 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12619 tg3_get_5761_nvram_info(tp);
12620 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12621 tg3_get_5906_nvram_info(tp);
12622 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12624 tg3_get_57780_nvram_info(tp);
12625 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12627 tg3_get_5717_nvram_info(tp);
12628 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12629 tg3_get_5720_nvram_info(tp);
12631 tg3_get_nvram_info(tp);
12633 if (tp->nvram_size == 0)
12634 tg3_get_nvram_size(tp);
12636 tg3_disable_nvram_access(tp);
12637 tg3_nvram_unlock(tp);
12640 tg3_flag_clear(tp, NVRAM);
12641 tg3_flag_clear(tp, NVRAM_BUFFERED);
12643 tg3_get_eeprom_size(tp);
12647 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12648 u32 offset, u32 len, u8 *buf)
12653 for (i = 0; i < len; i += 4) {
12659 memcpy(&data, buf + i, 4);
12662 * The SEEPROM interface expects the data to always be opposite
12663 * the native endian format. We accomplish this by reversing
12664 * all the operations that would have been performed on the
12665 * data from a call to tg3_nvram_read_be32().
12667 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12669 val = tr32(GRC_EEPROM_ADDR);
12670 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12672 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12674 tw32(GRC_EEPROM_ADDR, val |
12675 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12676 (addr & EEPROM_ADDR_ADDR_MASK) |
12677 EEPROM_ADDR_START |
12678 EEPROM_ADDR_WRITE);
12680 for (j = 0; j < 1000; j++) {
12681 val = tr32(GRC_EEPROM_ADDR);
12683 if (val & EEPROM_ADDR_COMPLETE)
12687 if (!(val & EEPROM_ADDR_COMPLETE)) {
12696 /* offset and length are dword aligned */
12697 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12701 u32 pagesize = tp->nvram_pagesize;
12702 u32 pagemask = pagesize - 1;
12706 tmp = kmalloc(pagesize, GFP_KERNEL);
12712 u32 phy_addr, page_off, size;
12714 phy_addr = offset & ~pagemask;
12716 for (j = 0; j < pagesize; j += 4) {
12717 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12718 (__be32 *) (tmp + j));
12725 page_off = offset & pagemask;
12732 memcpy(tmp + page_off, buf, size);
12734 offset = offset + (pagesize - page_off);
12736 tg3_enable_nvram_access(tp);
12739 * Before we can erase the flash page, we need
12740 * to issue a special "write enable" command.
12742 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12744 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12747 /* Erase the target page */
12748 tw32(NVRAM_ADDR, phy_addr);
12750 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12751 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12753 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12756 /* Issue another write enable to start the write. */
12757 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12759 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12762 for (j = 0; j < pagesize; j += 4) {
12765 data = *((__be32 *) (tmp + j));
12767 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12769 tw32(NVRAM_ADDR, phy_addr + j);
12771 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12775 nvram_cmd |= NVRAM_CMD_FIRST;
12776 else if (j == (pagesize - 4))
12777 nvram_cmd |= NVRAM_CMD_LAST;
12779 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12786 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12787 tg3_nvram_exec_cmd(tp, nvram_cmd);
12794 /* offset and length are dword aligned */
12795 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12800 for (i = 0; i < len; i += 4, offset += 4) {
12801 u32 page_off, phy_addr, nvram_cmd;
12804 memcpy(&data, buf + i, 4);
12805 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12807 page_off = offset % tp->nvram_pagesize;
12809 phy_addr = tg3_nvram_phys_addr(tp, offset);
12811 tw32(NVRAM_ADDR, phy_addr);
12813 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12815 if (page_off == 0 || i == 0)
12816 nvram_cmd |= NVRAM_CMD_FIRST;
12817 if (page_off == (tp->nvram_pagesize - 4))
12818 nvram_cmd |= NVRAM_CMD_LAST;
12820 if (i == (len - 4))
12821 nvram_cmd |= NVRAM_CMD_LAST;
12823 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12824 !tg3_flag(tp, 5755_PLUS) &&
12825 (tp->nvram_jedecnum == JEDEC_ST) &&
12826 (nvram_cmd & NVRAM_CMD_FIRST)) {
12828 if ((ret = tg3_nvram_exec_cmd(tp,
12829 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12834 if (!tg3_flag(tp, FLASH)) {
12835 /* We always do complete word writes to eeprom. */
12836 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12839 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12845 /* offset and length are dword aligned */
12846 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12850 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12851 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12852 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12856 if (!tg3_flag(tp, NVRAM)) {
12857 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12861 ret = tg3_nvram_lock(tp);
12865 tg3_enable_nvram_access(tp);
12866 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12867 tw32(NVRAM_WRITE1, 0x406);
12869 grc_mode = tr32(GRC_MODE);
12870 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12872 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12873 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12876 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12880 grc_mode = tr32(GRC_MODE);
12881 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12883 tg3_disable_nvram_access(tp);
12884 tg3_nvram_unlock(tp);
12887 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12888 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12895 struct subsys_tbl_ent {
12896 u16 subsys_vendor, subsys_devid;
12900 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12901 /* Broadcom boards. */
12902 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12903 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12904 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12905 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12906 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12907 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12908 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12909 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12910 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12911 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12912 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12913 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12914 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12915 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12916 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12917 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12918 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12919 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12920 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12921 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12922 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12923 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12926 { TG3PCI_SUBVENDOR_ID_3COM,
12927 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12928 { TG3PCI_SUBVENDOR_ID_3COM,
12929 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12930 { TG3PCI_SUBVENDOR_ID_3COM,
12931 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12932 { TG3PCI_SUBVENDOR_ID_3COM,
12933 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12934 { TG3PCI_SUBVENDOR_ID_3COM,
12935 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12938 { TG3PCI_SUBVENDOR_ID_DELL,
12939 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12940 { TG3PCI_SUBVENDOR_ID_DELL,
12941 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12942 { TG3PCI_SUBVENDOR_ID_DELL,
12943 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12944 { TG3PCI_SUBVENDOR_ID_DELL,
12945 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12947 /* Compaq boards. */
12948 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12949 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12950 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12951 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12952 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12953 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12954 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12955 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12956 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12957 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12960 { TG3PCI_SUBVENDOR_ID_IBM,
12961 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12964 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12968 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12969 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12970 tp->pdev->subsystem_vendor) &&
12971 (subsys_id_to_phy_id[i].subsys_devid ==
12972 tp->pdev->subsystem_device))
12973 return &subsys_id_to_phy_id[i];
12978 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12982 tp->phy_id = TG3_PHY_ID_INVALID;
12983 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12985 /* Assume an onboard device and WOL capable by default. */
12986 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12987 tg3_flag_set(tp, WOL_CAP);
12989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12990 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12991 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12992 tg3_flag_set(tp, IS_NIC);
12994 val = tr32(VCPU_CFGSHDW);
12995 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12996 tg3_flag_set(tp, ASPM_WORKAROUND);
12997 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12998 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12999 tg3_flag_set(tp, WOL_ENABLE);
13000 device_set_wakeup_enable(&tp->pdev->dev, true);
13005 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13006 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13007 u32 nic_cfg, led_cfg;
13008 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13009 int eeprom_phy_serdes = 0;
13011 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13012 tp->nic_sram_data_cfg = nic_cfg;
13014 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13015 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13016 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13017 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13018 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13019 (ver > 0) && (ver < 0x100))
13020 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13022 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13023 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13025 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13026 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13027 eeprom_phy_serdes = 1;
13029 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13030 if (nic_phy_id != 0) {
13031 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13032 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13034 eeprom_phy_id = (id1 >> 16) << 10;
13035 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13036 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13040 tp->phy_id = eeprom_phy_id;
13041 if (eeprom_phy_serdes) {
13042 if (!tg3_flag(tp, 5705_PLUS))
13043 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13045 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13048 if (tg3_flag(tp, 5750_PLUS))
13049 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13050 SHASTA_EXT_LED_MODE_MASK);
13052 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13056 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13057 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13060 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13061 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13064 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13065 tp->led_ctrl = LED_CTRL_MODE_MAC;
13067 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13068 * read on some older 5700/5701 bootcode.
13070 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13072 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13074 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13078 case SHASTA_EXT_LED_SHARED:
13079 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13080 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13081 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13082 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13083 LED_CTRL_MODE_PHY_2);
13086 case SHASTA_EXT_LED_MAC:
13087 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13090 case SHASTA_EXT_LED_COMBO:
13091 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13092 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13093 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13094 LED_CTRL_MODE_PHY_2);
13099 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13101 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13102 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13104 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13105 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13107 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13108 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13109 if ((tp->pdev->subsystem_vendor ==
13110 PCI_VENDOR_ID_ARIMA) &&
13111 (tp->pdev->subsystem_device == 0x205a ||
13112 tp->pdev->subsystem_device == 0x2063))
13113 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13115 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13116 tg3_flag_set(tp, IS_NIC);
13119 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13120 tg3_flag_set(tp, ENABLE_ASF);
13121 if (tg3_flag(tp, 5750_PLUS))
13122 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13125 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13126 tg3_flag(tp, 5750_PLUS))
13127 tg3_flag_set(tp, ENABLE_APE);
13129 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13130 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13131 tg3_flag_clear(tp, WOL_CAP);
13133 if (tg3_flag(tp, WOL_CAP) &&
13134 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13135 tg3_flag_set(tp, WOL_ENABLE);
13136 device_set_wakeup_enable(&tp->pdev->dev, true);
13139 if (cfg2 & (1 << 17))
13140 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13142 /* serdes signal pre-emphasis in register 0x590 set by */
13143 /* bootcode if bit 18 is set */
13144 if (cfg2 & (1 << 18))
13145 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13147 if ((tg3_flag(tp, 57765_PLUS) ||
13148 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13149 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13150 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13151 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13153 if (tg3_flag(tp, PCI_EXPRESS) &&
13154 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13155 !tg3_flag(tp, 57765_PLUS)) {
13158 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13159 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13160 tg3_flag_set(tp, ASPM_WORKAROUND);
13163 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13164 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13165 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13166 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13167 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13168 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13171 if (tg3_flag(tp, WOL_CAP))
13172 device_set_wakeup_enable(&tp->pdev->dev,
13173 tg3_flag(tp, WOL_ENABLE));
13175 device_set_wakeup_capable(&tp->pdev->dev, false);
13178 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13183 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13184 tw32(OTP_CTRL, cmd);
13186 /* Wait for up to 1 ms for command to execute. */
13187 for (i = 0; i < 100; i++) {
13188 val = tr32(OTP_STATUS);
13189 if (val & OTP_STATUS_CMD_DONE)
13194 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13197 /* Read the gphy configuration from the OTP region of the chip. The gphy
13198 * configuration is a 32-bit value that straddles the alignment boundary.
13199 * We do two 32-bit reads and then shift and merge the results.
13201 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13203 u32 bhalf_otp, thalf_otp;
13205 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13207 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13210 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13212 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13215 thalf_otp = tr32(OTP_READ_DATA);
13217 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13219 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13222 bhalf_otp = tr32(OTP_READ_DATA);
13224 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13227 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13229 u32 adv = ADVERTISED_Autoneg;
13231 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13232 adv |= ADVERTISED_1000baseT_Half |
13233 ADVERTISED_1000baseT_Full;
13235 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13236 adv |= ADVERTISED_100baseT_Half |
13237 ADVERTISED_100baseT_Full |
13238 ADVERTISED_10baseT_Half |
13239 ADVERTISED_10baseT_Full |
13242 adv |= ADVERTISED_FIBRE;
13244 tp->link_config.advertising = adv;
13245 tp->link_config.speed = SPEED_INVALID;
13246 tp->link_config.duplex = DUPLEX_INVALID;
13247 tp->link_config.autoneg = AUTONEG_ENABLE;
13248 tp->link_config.active_speed = SPEED_INVALID;
13249 tp->link_config.active_duplex = DUPLEX_INVALID;
13250 tp->link_config.orig_speed = SPEED_INVALID;
13251 tp->link_config.orig_duplex = DUPLEX_INVALID;
13252 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13255 static int __devinit tg3_phy_probe(struct tg3 *tp)
13257 u32 hw_phy_id_1, hw_phy_id_2;
13258 u32 hw_phy_id, hw_phy_id_masked;
13261 /* flow control autonegotiation is default behavior */
13262 tg3_flag_set(tp, PAUSE_AUTONEG);
13263 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13265 if (tg3_flag(tp, USE_PHYLIB))
13266 return tg3_phy_init(tp);
13268 /* Reading the PHY ID register can conflict with ASF
13269 * firmware access to the PHY hardware.
13272 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13273 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13275 /* Now read the physical PHY_ID from the chip and verify
13276 * that it is sane. If it doesn't look good, we fall back
13277 * to either the hard-coded table based PHY_ID and failing
13278 * that the value found in the eeprom area.
13280 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13281 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13283 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13284 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13285 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13287 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13290 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13291 tp->phy_id = hw_phy_id;
13292 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13293 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13295 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13297 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13298 /* Do nothing, phy ID already set up in
13299 * tg3_get_eeprom_hw_cfg().
13302 struct subsys_tbl_ent *p;
13304 /* No eeprom signature? Try the hardcoded
13305 * subsys device table.
13307 p = tg3_lookup_by_subsys(tp);
13311 tp->phy_id = p->phy_id;
13313 tp->phy_id == TG3_PHY_ID_BCM8002)
13314 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13318 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13319 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13320 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13321 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13322 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13323 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13324 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13325 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13327 tg3_phy_init_link_config(tp);
13329 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13330 !tg3_flag(tp, ENABLE_APE) &&
13331 !tg3_flag(tp, ENABLE_ASF)) {
13334 tg3_readphy(tp, MII_BMSR, &bmsr);
13335 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13336 (bmsr & BMSR_LSTATUS))
13337 goto skip_phy_reset;
13339 err = tg3_phy_reset(tp);
13343 tg3_phy_set_wirespeed(tp);
13345 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13346 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13347 tp->link_config.flowctrl);
13349 tg3_writephy(tp, MII_BMCR,
13350 BMCR_ANENABLE | BMCR_ANRESTART);
13355 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13356 err = tg3_init_5401phy_dsp(tp);
13360 err = tg3_init_5401phy_dsp(tp);
13366 static void __devinit tg3_read_vpd(struct tg3 *tp)
13369 unsigned int block_end, rosize, len;
13373 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13377 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13379 goto out_not_found;
13381 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13382 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13383 i += PCI_VPD_LRDT_TAG_SIZE;
13385 if (block_end > vpdlen)
13386 goto out_not_found;
13388 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13389 PCI_VPD_RO_KEYWORD_MFR_ID);
13391 len = pci_vpd_info_field_size(&vpd_data[j]);
13393 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13394 if (j + len > block_end || len != 4 ||
13395 memcmp(&vpd_data[j], "1028", 4))
13398 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13399 PCI_VPD_RO_KEYWORD_VENDOR0);
13403 len = pci_vpd_info_field_size(&vpd_data[j]);
13405 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13406 if (j + len > block_end)
13409 memcpy(tp->fw_ver, &vpd_data[j], len);
13410 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13414 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13415 PCI_VPD_RO_KEYWORD_PARTNO);
13417 goto out_not_found;
13419 len = pci_vpd_info_field_size(&vpd_data[i]);
13421 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13422 if (len > TG3_BPN_SIZE ||
13423 (len + i) > vpdlen)
13424 goto out_not_found;
13426 memcpy(tp->board_part_number, &vpd_data[i], len);
13430 if (tp->board_part_number[0])
13434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13435 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13436 strcpy(tp->board_part_number, "BCM5717");
13437 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13438 strcpy(tp->board_part_number, "BCM5718");
13441 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13442 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13443 strcpy(tp->board_part_number, "BCM57780");
13444 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13445 strcpy(tp->board_part_number, "BCM57760");
13446 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13447 strcpy(tp->board_part_number, "BCM57790");
13448 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13449 strcpy(tp->board_part_number, "BCM57788");
13452 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13453 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13454 strcpy(tp->board_part_number, "BCM57761");
13455 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13456 strcpy(tp->board_part_number, "BCM57765");
13457 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13458 strcpy(tp->board_part_number, "BCM57781");
13459 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13460 strcpy(tp->board_part_number, "BCM57785");
13461 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13462 strcpy(tp->board_part_number, "BCM57791");
13463 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13464 strcpy(tp->board_part_number, "BCM57795");
13467 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13468 strcpy(tp->board_part_number, "BCM95906");
13471 strcpy(tp->board_part_number, "none");
13475 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13479 if (tg3_nvram_read(tp, offset, &val) ||
13480 (val & 0xfc000000) != 0x0c000000 ||
13481 tg3_nvram_read(tp, offset + 4, &val) ||
13488 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13490 u32 val, offset, start, ver_offset;
13492 bool newver = false;
13494 if (tg3_nvram_read(tp, 0xc, &offset) ||
13495 tg3_nvram_read(tp, 0x4, &start))
13498 offset = tg3_nvram_logical_addr(tp, offset);
13500 if (tg3_nvram_read(tp, offset, &val))
13503 if ((val & 0xfc000000) == 0x0c000000) {
13504 if (tg3_nvram_read(tp, offset + 4, &val))
13511 dst_off = strlen(tp->fw_ver);
13514 if (TG3_VER_SIZE - dst_off < 16 ||
13515 tg3_nvram_read(tp, offset + 8, &ver_offset))
13518 offset = offset + ver_offset - start;
13519 for (i = 0; i < 16; i += 4) {
13521 if (tg3_nvram_read_be32(tp, offset + i, &v))
13524 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13529 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13532 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13533 TG3_NVM_BCVER_MAJSFT;
13534 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13535 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13536 "v%d.%02d", major, minor);
13540 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13542 u32 val, major, minor;
13544 /* Use native endian representation */
13545 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13548 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13549 TG3_NVM_HWSB_CFG1_MAJSFT;
13550 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13551 TG3_NVM_HWSB_CFG1_MINSFT;
13553 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13556 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13558 u32 offset, major, minor, build;
13560 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13562 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13565 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13566 case TG3_EEPROM_SB_REVISION_0:
13567 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13569 case TG3_EEPROM_SB_REVISION_2:
13570 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13572 case TG3_EEPROM_SB_REVISION_3:
13573 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13575 case TG3_EEPROM_SB_REVISION_4:
13576 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13578 case TG3_EEPROM_SB_REVISION_5:
13579 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13581 case TG3_EEPROM_SB_REVISION_6:
13582 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13588 if (tg3_nvram_read(tp, offset, &val))
13591 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13592 TG3_EEPROM_SB_EDH_BLD_SHFT;
13593 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13594 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13595 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13597 if (minor > 99 || build > 26)
13600 offset = strlen(tp->fw_ver);
13601 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13602 " v%d.%02d", major, minor);
13605 offset = strlen(tp->fw_ver);
13606 if (offset < TG3_VER_SIZE - 1)
13607 tp->fw_ver[offset] = 'a' + build - 1;
13611 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13613 u32 val, offset, start;
13616 for (offset = TG3_NVM_DIR_START;
13617 offset < TG3_NVM_DIR_END;
13618 offset += TG3_NVM_DIRENT_SIZE) {
13619 if (tg3_nvram_read(tp, offset, &val))
13622 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13626 if (offset == TG3_NVM_DIR_END)
13629 if (!tg3_flag(tp, 5705_PLUS))
13630 start = 0x08000000;
13631 else if (tg3_nvram_read(tp, offset - 4, &start))
13634 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13635 !tg3_fw_img_is_valid(tp, offset) ||
13636 tg3_nvram_read(tp, offset + 8, &val))
13639 offset += val - start;
13641 vlen = strlen(tp->fw_ver);
13643 tp->fw_ver[vlen++] = ',';
13644 tp->fw_ver[vlen++] = ' ';
13646 for (i = 0; i < 4; i++) {
13648 if (tg3_nvram_read_be32(tp, offset, &v))
13651 offset += sizeof(v);
13653 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13654 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13658 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13663 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13669 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13672 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13673 if (apedata != APE_SEG_SIG_MAGIC)
13676 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13677 if (!(apedata & APE_FW_STATUS_READY))
13680 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13682 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13683 tg3_flag_set(tp, APE_HAS_NCSI);
13689 vlen = strlen(tp->fw_ver);
13691 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13693 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13694 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13695 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13696 (apedata & APE_FW_VERSION_BLDMSK));
13699 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13702 bool vpd_vers = false;
13704 if (tp->fw_ver[0] != 0)
13707 if (tg3_flag(tp, NO_NVRAM)) {
13708 strcat(tp->fw_ver, "sb");
13712 if (tg3_nvram_read(tp, 0, &val))
13715 if (val == TG3_EEPROM_MAGIC)
13716 tg3_read_bc_ver(tp);
13717 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13718 tg3_read_sb_ver(tp, val);
13719 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13720 tg3_read_hwsb_ver(tp);
13727 if (tg3_flag(tp, ENABLE_APE)) {
13728 if (tg3_flag(tp, ENABLE_ASF))
13729 tg3_read_dash_ver(tp);
13730 } else if (tg3_flag(tp, ENABLE_ASF)) {
13731 tg3_read_mgmtfw_ver(tp);
13735 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13738 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13740 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13742 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13743 return TG3_RX_RET_MAX_SIZE_5717;
13744 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13745 return TG3_RX_RET_MAX_SIZE_5700;
13747 return TG3_RX_RET_MAX_SIZE_5705;
13750 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13751 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13752 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13753 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13757 static int __devinit tg3_get_invariants(struct tg3 *tp)
13760 u32 pci_state_reg, grc_misc_cfg;
13765 /* Force memory write invalidate off. If we leave it on,
13766 * then on 5700_BX chips we have to enable a workaround.
13767 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13768 * to match the cacheline size. The Broadcom driver have this
13769 * workaround but turns MWI off all the times so never uses
13770 * it. This seems to suggest that the workaround is insufficient.
13772 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13773 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13774 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13776 /* Important! -- Make sure register accesses are byteswapped
13777 * correctly. Also, for those chips that require it, make
13778 * sure that indirect register accesses are enabled before
13779 * the first operation.
13781 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13783 tp->misc_host_ctrl |= (misc_ctrl_reg &
13784 MISC_HOST_CTRL_CHIPREV);
13785 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13786 tp->misc_host_ctrl);
13788 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13789 MISC_HOST_CTRL_CHIPREV_SHIFT);
13790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13791 u32 prod_id_asic_rev;
13793 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13794 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13795 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13796 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13797 pci_read_config_dword(tp->pdev,
13798 TG3PCI_GEN2_PRODID_ASICREV,
13799 &prod_id_asic_rev);
13800 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13801 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13802 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13803 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13804 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13805 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13806 pci_read_config_dword(tp->pdev,
13807 TG3PCI_GEN15_PRODID_ASICREV,
13808 &prod_id_asic_rev);
13810 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13811 &prod_id_asic_rev);
13813 tp->pci_chip_rev_id = prod_id_asic_rev;
13816 /* Wrong chip ID in 5752 A0. This code can be removed later
13817 * as A0 is not in production.
13819 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13820 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13822 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13823 * we need to disable memory and use config. cycles
13824 * only to access all registers. The 5702/03 chips
13825 * can mistakenly decode the special cycles from the
13826 * ICH chipsets as memory write cycles, causing corruption
13827 * of register and memory space. Only certain ICH bridges
13828 * will drive special cycles with non-zero data during the
13829 * address phase which can fall within the 5703's address
13830 * range. This is not an ICH bug as the PCI spec allows
13831 * non-zero address during special cycles. However, only
13832 * these ICH bridges are known to drive non-zero addresses
13833 * during special cycles.
13835 * Since special cycles do not cross PCI bridges, we only
13836 * enable this workaround if the 5703 is on the secondary
13837 * bus of these ICH bridges.
13839 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13840 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13841 static struct tg3_dev_id {
13845 } ich_chipsets[] = {
13846 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13848 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13850 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13852 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13856 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13857 struct pci_dev *bridge = NULL;
13859 while (pci_id->vendor != 0) {
13860 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13866 if (pci_id->rev != PCI_ANY_ID) {
13867 if (bridge->revision > pci_id->rev)
13870 if (bridge->subordinate &&
13871 (bridge->subordinate->number ==
13872 tp->pdev->bus->number)) {
13873 tg3_flag_set(tp, ICH_WORKAROUND);
13874 pci_dev_put(bridge);
13880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13881 static struct tg3_dev_id {
13884 } bridge_chipsets[] = {
13885 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13886 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13889 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13890 struct pci_dev *bridge = NULL;
13892 while (pci_id->vendor != 0) {
13893 bridge = pci_get_device(pci_id->vendor,
13900 if (bridge->subordinate &&
13901 (bridge->subordinate->number <=
13902 tp->pdev->bus->number) &&
13903 (bridge->subordinate->subordinate >=
13904 tp->pdev->bus->number)) {
13905 tg3_flag_set(tp, 5701_DMA_BUG);
13906 pci_dev_put(bridge);
13912 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13913 * DMA addresses > 40-bit. This bridge may have other additional
13914 * 57xx devices behind it in some 4-port NIC designs for example.
13915 * Any tg3 device found behind the bridge will also need the 40-bit
13918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13919 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13920 tg3_flag_set(tp, 5780_CLASS);
13921 tg3_flag_set(tp, 40BIT_DMA_BUG);
13922 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13924 struct pci_dev *bridge = NULL;
13927 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13928 PCI_DEVICE_ID_SERVERWORKS_EPB,
13930 if (bridge && bridge->subordinate &&
13931 (bridge->subordinate->number <=
13932 tp->pdev->bus->number) &&
13933 (bridge->subordinate->subordinate >=
13934 tp->pdev->bus->number)) {
13935 tg3_flag_set(tp, 40BIT_DMA_BUG);
13936 pci_dev_put(bridge);
13942 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13944 tp->pdev_peer = tg3_find_peer(tp);
13946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13949 tg3_flag_set(tp, 5717_PLUS);
13951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13952 tg3_flag(tp, 5717_PLUS))
13953 tg3_flag_set(tp, 57765_PLUS);
13955 /* Intentionally exclude ASIC_REV_5906 */
13956 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13961 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13962 tg3_flag(tp, 57765_PLUS))
13963 tg3_flag_set(tp, 5755_PLUS);
13965 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13968 tg3_flag(tp, 5755_PLUS) ||
13969 tg3_flag(tp, 5780_CLASS))
13970 tg3_flag_set(tp, 5750_PLUS);
13972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13973 tg3_flag(tp, 5750_PLUS))
13974 tg3_flag_set(tp, 5705_PLUS);
13976 /* Determine TSO capabilities */
13977 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13978 ; /* Do nothing. HW bug. */
13979 else if (tg3_flag(tp, 57765_PLUS))
13980 tg3_flag_set(tp, HW_TSO_3);
13981 else if (tg3_flag(tp, 5755_PLUS) ||
13982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13983 tg3_flag_set(tp, HW_TSO_2);
13984 else if (tg3_flag(tp, 5750_PLUS)) {
13985 tg3_flag_set(tp, HW_TSO_1);
13986 tg3_flag_set(tp, TSO_BUG);
13987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13988 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13989 tg3_flag_clear(tp, TSO_BUG);
13990 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13991 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13992 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13993 tg3_flag_set(tp, TSO_BUG);
13994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13995 tp->fw_needed = FIRMWARE_TG3TSO5;
13997 tp->fw_needed = FIRMWARE_TG3TSO;
14000 /* Selectively allow TSO based on operating conditions */
14001 if (tg3_flag(tp, HW_TSO_1) ||
14002 tg3_flag(tp, HW_TSO_2) ||
14003 tg3_flag(tp, HW_TSO_3) ||
14005 /* For firmware TSO, assume ASF is disabled.
14006 * We'll disable TSO later if we discover ASF
14007 * is enabled in tg3_get_eeprom_hw_cfg().
14009 tg3_flag_set(tp, TSO_CAPABLE);
14011 tg3_flag_clear(tp, TSO_CAPABLE);
14012 tg3_flag_clear(tp, TSO_BUG);
14013 tp->fw_needed = NULL;
14016 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14017 tp->fw_needed = FIRMWARE_TG3;
14021 if (tg3_flag(tp, 5750_PLUS)) {
14022 tg3_flag_set(tp, SUPPORT_MSI);
14023 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14024 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14025 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14026 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14027 tp->pdev_peer == tp->pdev))
14028 tg3_flag_clear(tp, SUPPORT_MSI);
14030 if (tg3_flag(tp, 5755_PLUS) ||
14031 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14032 tg3_flag_set(tp, 1SHOT_MSI);
14035 if (tg3_flag(tp, 57765_PLUS)) {
14036 tg3_flag_set(tp, SUPPORT_MSIX);
14037 tp->irq_max = TG3_IRQ_MAX_VECS;
14041 if (tg3_flag(tp, 5755_PLUS))
14042 tg3_flag_set(tp, SHORT_DMA_BUG);
14044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14045 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14048 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14050 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14052 if (tg3_flag(tp, 57765_PLUS) &&
14053 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14054 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14056 if (!tg3_flag(tp, 5705_PLUS) ||
14057 tg3_flag(tp, 5780_CLASS) ||
14058 tg3_flag(tp, USE_JUMBO_BDFLAG))
14059 tg3_flag_set(tp, JUMBO_CAPABLE);
14061 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14064 if (pci_is_pcie(tp->pdev)) {
14067 tg3_flag_set(tp, PCI_EXPRESS);
14069 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14070 int readrq = pcie_get_readrq(tp->pdev);
14072 pcie_set_readrq(tp->pdev, 2048);
14075 pci_read_config_word(tp->pdev,
14076 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14078 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14079 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14081 tg3_flag_clear(tp, HW_TSO_2);
14082 tg3_flag_clear(tp, TSO_CAPABLE);
14084 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14085 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14086 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14087 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14088 tg3_flag_set(tp, CLKREQ_BUG);
14089 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14090 tg3_flag_set(tp, L1PLLPD_EN);
14092 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14093 /* BCM5785 devices are effectively PCIe devices, and should
14094 * follow PCIe codepaths, but do not have a PCIe capabilities
14097 tg3_flag_set(tp, PCI_EXPRESS);
14098 } else if (!tg3_flag(tp, 5705_PLUS) ||
14099 tg3_flag(tp, 5780_CLASS)) {
14100 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14101 if (!tp->pcix_cap) {
14102 dev_err(&tp->pdev->dev,
14103 "Cannot find PCI-X capability, aborting\n");
14107 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14108 tg3_flag_set(tp, PCIX_MODE);
14111 /* If we have an AMD 762 or VIA K8T800 chipset, write
14112 * reordering to the mailbox registers done by the host
14113 * controller can cause major troubles. We read back from
14114 * every mailbox register write to force the writes to be
14115 * posted to the chip in order.
14117 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14118 !tg3_flag(tp, PCI_EXPRESS))
14119 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14121 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14122 &tp->pci_cacheline_sz);
14123 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14124 &tp->pci_lat_timer);
14125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14126 tp->pci_lat_timer < 64) {
14127 tp->pci_lat_timer = 64;
14128 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14129 tp->pci_lat_timer);
14132 /* Important! -- It is critical that the PCI-X hw workaround
14133 * situation is decided before the first MMIO register access.
14135 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14136 /* 5700 BX chips need to have their TX producer index
14137 * mailboxes written twice to workaround a bug.
14139 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14141 /* If we are in PCI-X mode, enable register write workaround.
14143 * The workaround is to use indirect register accesses
14144 * for all chip writes not to mailbox registers.
14146 if (tg3_flag(tp, PCIX_MODE)) {
14149 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14151 /* The chip can have it's power management PCI config
14152 * space registers clobbered due to this bug.
14153 * So explicitly force the chip into D0 here.
14155 pci_read_config_dword(tp->pdev,
14156 tp->pm_cap + PCI_PM_CTRL,
14158 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14159 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14160 pci_write_config_dword(tp->pdev,
14161 tp->pm_cap + PCI_PM_CTRL,
14164 /* Also, force SERR#/PERR# in PCI command. */
14165 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14166 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14167 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14171 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14172 tg3_flag_set(tp, PCI_HIGH_SPEED);
14173 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14174 tg3_flag_set(tp, PCI_32BIT);
14176 /* Chip-specific fixup from Broadcom driver */
14177 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14178 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14179 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14180 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14183 /* Default fast path register access methods */
14184 tp->read32 = tg3_read32;
14185 tp->write32 = tg3_write32;
14186 tp->read32_mbox = tg3_read32;
14187 tp->write32_mbox = tg3_write32;
14188 tp->write32_tx_mbox = tg3_write32;
14189 tp->write32_rx_mbox = tg3_write32;
14191 /* Various workaround register access methods */
14192 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14193 tp->write32 = tg3_write_indirect_reg32;
14194 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14195 (tg3_flag(tp, PCI_EXPRESS) &&
14196 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14198 * Back to back register writes can cause problems on these
14199 * chips, the workaround is to read back all reg writes
14200 * except those to mailbox regs.
14202 * See tg3_write_indirect_reg32().
14204 tp->write32 = tg3_write_flush_reg32;
14207 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14208 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14209 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14210 tp->write32_rx_mbox = tg3_write_flush_reg32;
14213 if (tg3_flag(tp, ICH_WORKAROUND)) {
14214 tp->read32 = tg3_read_indirect_reg32;
14215 tp->write32 = tg3_write_indirect_reg32;
14216 tp->read32_mbox = tg3_read_indirect_mbox;
14217 tp->write32_mbox = tg3_write_indirect_mbox;
14218 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14219 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14224 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14225 pci_cmd &= ~PCI_COMMAND_MEMORY;
14226 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14229 tp->read32_mbox = tg3_read32_mbox_5906;
14230 tp->write32_mbox = tg3_write32_mbox_5906;
14231 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14232 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14235 if (tp->write32 == tg3_write_indirect_reg32 ||
14236 (tg3_flag(tp, PCIX_MODE) &&
14237 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14238 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14239 tg3_flag_set(tp, SRAM_USE_CONFIG);
14241 /* The memory arbiter has to be enabled in order for SRAM accesses
14242 * to succeed. Normally on powerup the tg3 chip firmware will make
14243 * sure it is enabled, but other entities such as system netboot
14244 * code might disable it.
14246 val = tr32(MEMARB_MODE);
14247 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14249 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14250 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14251 tg3_flag(tp, 5780_CLASS)) {
14252 if (tg3_flag(tp, PCIX_MODE)) {
14253 pci_read_config_dword(tp->pdev,
14254 tp->pcix_cap + PCI_X_STATUS,
14256 tp->pci_fn = val & 0x7;
14258 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14259 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14260 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14261 NIC_SRAM_CPMUSTAT_SIG) {
14262 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14263 tp->pci_fn = tp->pci_fn ? 1 : 0;
14265 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14267 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14268 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14269 NIC_SRAM_CPMUSTAT_SIG) {
14270 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14271 TG3_CPMU_STATUS_FSHFT_5719;
14275 /* Get eeprom hw config before calling tg3_set_power_state().
14276 * In particular, the TG3_FLAG_IS_NIC flag must be
14277 * determined before calling tg3_set_power_state() so that
14278 * we know whether or not to switch out of Vaux power.
14279 * When the flag is set, it means that GPIO1 is used for eeprom
14280 * write protect and also implies that it is a LOM where GPIOs
14281 * are not used to switch power.
14283 tg3_get_eeprom_hw_cfg(tp);
14285 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14286 tg3_flag_clear(tp, TSO_CAPABLE);
14287 tg3_flag_clear(tp, TSO_BUG);
14288 tp->fw_needed = NULL;
14291 if (tg3_flag(tp, ENABLE_APE)) {
14292 /* Allow reads and writes to the
14293 * APE register and memory space.
14295 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14296 PCISTATE_ALLOW_APE_SHMEM_WR |
14297 PCISTATE_ALLOW_APE_PSPACE_WR;
14298 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14301 tg3_ape_lock_init(tp);
14304 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14306 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14308 tg3_flag(tp, 57765_PLUS))
14309 tg3_flag_set(tp, CPMU_PRESENT);
14311 /* Set up tp->grc_local_ctrl before calling
14312 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14313 * will bring 5700's external PHY out of reset.
14314 * It is also used as eeprom write protect on LOMs.
14316 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14318 tg3_flag(tp, EEPROM_WRITE_PROT))
14319 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14320 GRC_LCLCTRL_GPIO_OUTPUT1);
14321 /* Unused GPIO3 must be driven as output on 5752 because there
14322 * are no pull-up resistors on unused GPIO pins.
14324 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14325 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14327 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14328 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14329 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14330 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14332 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14333 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14334 /* Turn off the debug UART. */
14335 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14336 if (tg3_flag(tp, IS_NIC))
14337 /* Keep VMain power. */
14338 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14339 GRC_LCLCTRL_GPIO_OUTPUT0;
14342 /* Switch out of Vaux if it is a NIC */
14343 tg3_pwrsrc_switch_to_vmain(tp);
14345 /* Derive initial jumbo mode from MTU assigned in
14346 * ether_setup() via the alloc_etherdev() call
14348 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14349 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14351 /* Determine WakeOnLan speed to use. */
14352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14353 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14354 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14355 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14356 tg3_flag_clear(tp, WOL_SPEED_100MB);
14358 tg3_flag_set(tp, WOL_SPEED_100MB);
14361 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14362 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14364 /* A few boards don't want Ethernet@WireSpeed phy feature */
14365 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14366 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14367 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14368 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14369 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14370 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14371 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14373 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14374 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14375 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14376 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14377 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14379 if (tg3_flag(tp, 5705_PLUS) &&
14380 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14381 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14382 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14383 !tg3_flag(tp, 57765_PLUS)) {
14384 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14386 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14387 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14388 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14389 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14390 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14391 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14392 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14394 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14398 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14399 tp->phy_otp = tg3_read_otp_phycfg(tp);
14400 if (tp->phy_otp == 0)
14401 tp->phy_otp = TG3_OTP_DEFAULT;
14404 if (tg3_flag(tp, CPMU_PRESENT))
14405 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14407 tp->mi_mode = MAC_MI_MODE_BASE;
14409 tp->coalesce_mode = 0;
14410 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14411 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14412 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14414 /* Set these bits to enable statistics workaround. */
14415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14416 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14417 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14418 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14419 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14423 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14424 tg3_flag_set(tp, USE_PHYLIB);
14426 err = tg3_mdio_init(tp);
14430 /* Initialize data/descriptor byte/word swapping. */
14431 val = tr32(GRC_MODE);
14432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14433 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14434 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14435 GRC_MODE_B2HRX_ENABLE |
14436 GRC_MODE_HTX2B_ENABLE |
14437 GRC_MODE_HOST_STACKUP);
14439 val &= GRC_MODE_HOST_STACKUP;
14441 tw32(GRC_MODE, val | tp->grc_mode);
14443 tg3_switch_clocks(tp);
14445 /* Clear this out for sanity. */
14446 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14448 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14450 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14451 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14452 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14454 if (chiprevid == CHIPREV_ID_5701_A0 ||
14455 chiprevid == CHIPREV_ID_5701_B0 ||
14456 chiprevid == CHIPREV_ID_5701_B2 ||
14457 chiprevid == CHIPREV_ID_5701_B5) {
14458 void __iomem *sram_base;
14460 /* Write some dummy words into the SRAM status block
14461 * area, see if it reads back correctly. If the return
14462 * value is bad, force enable the PCIX workaround.
14464 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14466 writel(0x00000000, sram_base);
14467 writel(0x00000000, sram_base + 4);
14468 writel(0xffffffff, sram_base + 4);
14469 if (readl(sram_base) != 0x00000000)
14470 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14475 tg3_nvram_init(tp);
14477 grc_misc_cfg = tr32(GRC_MISC_CFG);
14478 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14481 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14482 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14483 tg3_flag_set(tp, IS_5788);
14485 if (!tg3_flag(tp, IS_5788) &&
14486 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14487 tg3_flag_set(tp, TAGGED_STATUS);
14488 if (tg3_flag(tp, TAGGED_STATUS)) {
14489 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14490 HOSTCC_MODE_CLRTICK_TXBD);
14492 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14493 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14494 tp->misc_host_ctrl);
14497 /* Preserve the APE MAC_MODE bits */
14498 if (tg3_flag(tp, ENABLE_APE))
14499 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14503 /* these are limited to 10/100 only */
14504 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14505 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14506 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14507 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14508 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14509 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14510 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14511 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14512 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14513 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14514 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14515 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14516 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14517 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14518 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14519 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14521 err = tg3_phy_probe(tp);
14523 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14524 /* ... but do not return immediately ... */
14529 tg3_read_fw_ver(tp);
14531 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14532 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14534 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14535 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14537 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14540 /* 5700 {AX,BX} chips have a broken status block link
14541 * change bit implementation, so we must use the
14542 * status register in those cases.
14544 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14545 tg3_flag_set(tp, USE_LINKCHG_REG);
14547 tg3_flag_clear(tp, USE_LINKCHG_REG);
14549 /* The led_ctrl is set during tg3_phy_probe, here we might
14550 * have to force the link status polling mechanism based
14551 * upon subsystem IDs.
14553 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14555 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14556 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14557 tg3_flag_set(tp, USE_LINKCHG_REG);
14560 /* For all SERDES we poll the MAC status register. */
14561 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14562 tg3_flag_set(tp, POLL_SERDES);
14564 tg3_flag_clear(tp, POLL_SERDES);
14566 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14567 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14569 tg3_flag(tp, PCIX_MODE)) {
14570 tp->rx_offset = NET_SKB_PAD;
14571 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14572 tp->rx_copy_thresh = ~(u16)0;
14576 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14577 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14578 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14580 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14582 /* Increment the rx prod index on the rx std ring by at most
14583 * 8 for these chips to workaround hw errata.
14585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14586 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14588 tp->rx_std_max_post = 8;
14590 if (tg3_flag(tp, ASPM_WORKAROUND))
14591 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14592 PCIE_PWR_MGMT_L1_THRESH_MSK;
14597 #ifdef CONFIG_SPARC
14598 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14600 struct net_device *dev = tp->dev;
14601 struct pci_dev *pdev = tp->pdev;
14602 struct device_node *dp = pci_device_to_OF_node(pdev);
14603 const unsigned char *addr;
14606 addr = of_get_property(dp, "local-mac-address", &len);
14607 if (addr && len == 6) {
14608 memcpy(dev->dev_addr, addr, 6);
14609 memcpy(dev->perm_addr, dev->dev_addr, 6);
14615 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14617 struct net_device *dev = tp->dev;
14619 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14620 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14625 static int __devinit tg3_get_device_address(struct tg3 *tp)
14627 struct net_device *dev = tp->dev;
14628 u32 hi, lo, mac_offset;
14631 #ifdef CONFIG_SPARC
14632 if (!tg3_get_macaddr_sparc(tp))
14637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14638 tg3_flag(tp, 5780_CLASS)) {
14639 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14641 if (tg3_nvram_lock(tp))
14642 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14644 tg3_nvram_unlock(tp);
14645 } else if (tg3_flag(tp, 5717_PLUS)) {
14646 if (tp->pci_fn & 1)
14648 if (tp->pci_fn > 1)
14649 mac_offset += 0x18c;
14650 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14653 /* First try to get it from MAC address mailbox. */
14654 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14655 if ((hi >> 16) == 0x484b) {
14656 dev->dev_addr[0] = (hi >> 8) & 0xff;
14657 dev->dev_addr[1] = (hi >> 0) & 0xff;
14659 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14660 dev->dev_addr[2] = (lo >> 24) & 0xff;
14661 dev->dev_addr[3] = (lo >> 16) & 0xff;
14662 dev->dev_addr[4] = (lo >> 8) & 0xff;
14663 dev->dev_addr[5] = (lo >> 0) & 0xff;
14665 /* Some old bootcode may report a 0 MAC address in SRAM */
14666 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14669 /* Next, try NVRAM. */
14670 if (!tg3_flag(tp, NO_NVRAM) &&
14671 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14672 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14673 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14674 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14676 /* Finally just fetch it out of the MAC control regs. */
14678 hi = tr32(MAC_ADDR_0_HIGH);
14679 lo = tr32(MAC_ADDR_0_LOW);
14681 dev->dev_addr[5] = lo & 0xff;
14682 dev->dev_addr[4] = (lo >> 8) & 0xff;
14683 dev->dev_addr[3] = (lo >> 16) & 0xff;
14684 dev->dev_addr[2] = (lo >> 24) & 0xff;
14685 dev->dev_addr[1] = hi & 0xff;
14686 dev->dev_addr[0] = (hi >> 8) & 0xff;
14690 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14691 #ifdef CONFIG_SPARC
14692 if (!tg3_get_default_macaddr_sparc(tp))
14697 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14701 #define BOUNDARY_SINGLE_CACHELINE 1
14702 #define BOUNDARY_MULTI_CACHELINE 2
14704 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14706 int cacheline_size;
14710 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14712 cacheline_size = 1024;
14714 cacheline_size = (int) byte * 4;
14716 /* On 5703 and later chips, the boundary bits have no
14719 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14720 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14721 !tg3_flag(tp, PCI_EXPRESS))
14724 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14725 goal = BOUNDARY_MULTI_CACHELINE;
14727 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14728 goal = BOUNDARY_SINGLE_CACHELINE;
14734 if (tg3_flag(tp, 57765_PLUS)) {
14735 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14742 /* PCI controllers on most RISC systems tend to disconnect
14743 * when a device tries to burst across a cache-line boundary.
14744 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14746 * Unfortunately, for PCI-E there are only limited
14747 * write-side controls for this, and thus for reads
14748 * we will still get the disconnects. We'll also waste
14749 * these PCI cycles for both read and write for chips
14750 * other than 5700 and 5701 which do not implement the
14753 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14754 switch (cacheline_size) {
14759 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14760 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14761 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14763 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14764 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14769 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14770 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14774 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14775 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14778 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14779 switch (cacheline_size) {
14783 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14784 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14785 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14791 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14792 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14796 switch (cacheline_size) {
14798 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14799 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14800 DMA_RWCTRL_WRITE_BNDRY_16);
14805 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14806 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14807 DMA_RWCTRL_WRITE_BNDRY_32);
14812 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14813 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14814 DMA_RWCTRL_WRITE_BNDRY_64);
14819 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14820 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14821 DMA_RWCTRL_WRITE_BNDRY_128);
14826 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14827 DMA_RWCTRL_WRITE_BNDRY_256);
14830 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14831 DMA_RWCTRL_WRITE_BNDRY_512);
14835 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14836 DMA_RWCTRL_WRITE_BNDRY_1024);
14845 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14847 struct tg3_internal_buffer_desc test_desc;
14848 u32 sram_dma_descs;
14851 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14853 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14854 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14855 tw32(RDMAC_STATUS, 0);
14856 tw32(WDMAC_STATUS, 0);
14858 tw32(BUFMGR_MODE, 0);
14859 tw32(FTQ_RESET, 0);
14861 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14862 test_desc.addr_lo = buf_dma & 0xffffffff;
14863 test_desc.nic_mbuf = 0x00002100;
14864 test_desc.len = size;
14867 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14868 * the *second* time the tg3 driver was getting loaded after an
14871 * Broadcom tells me:
14872 * ...the DMA engine is connected to the GRC block and a DMA
14873 * reset may affect the GRC block in some unpredictable way...
14874 * The behavior of resets to individual blocks has not been tested.
14876 * Broadcom noted the GRC reset will also reset all sub-components.
14879 test_desc.cqid_sqid = (13 << 8) | 2;
14881 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14884 test_desc.cqid_sqid = (16 << 8) | 7;
14886 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14889 test_desc.flags = 0x00000005;
14891 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14894 val = *(((u32 *)&test_desc) + i);
14895 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14896 sram_dma_descs + (i * sizeof(u32)));
14897 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14899 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14902 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14904 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14907 for (i = 0; i < 40; i++) {
14911 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14913 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14914 if ((val & 0xffff) == sram_dma_descs) {
14925 #define TEST_BUFFER_SIZE 0x2000
14927 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14928 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14932 static int __devinit tg3_test_dma(struct tg3 *tp)
14934 dma_addr_t buf_dma;
14935 u32 *buf, saved_dma_rwctrl;
14938 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14939 &buf_dma, GFP_KERNEL);
14945 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14946 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14948 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14950 if (tg3_flag(tp, 57765_PLUS))
14953 if (tg3_flag(tp, PCI_EXPRESS)) {
14954 /* DMA read watermark not used on PCIE */
14955 tp->dma_rwctrl |= 0x00180000;
14956 } else if (!tg3_flag(tp, PCIX_MODE)) {
14957 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14959 tp->dma_rwctrl |= 0x003f0000;
14961 tp->dma_rwctrl |= 0x003f000f;
14963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14965 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14966 u32 read_water = 0x7;
14968 /* If the 5704 is behind the EPB bridge, we can
14969 * do the less restrictive ONE_DMA workaround for
14970 * better performance.
14972 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14974 tp->dma_rwctrl |= 0x8000;
14975 else if (ccval == 0x6 || ccval == 0x7)
14976 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14978 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14980 /* Set bit 23 to enable PCIX hw bug fix */
14982 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14983 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14985 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14986 /* 5780 always in PCIX mode */
14987 tp->dma_rwctrl |= 0x00144000;
14988 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14989 /* 5714 always in PCIX mode */
14990 tp->dma_rwctrl |= 0x00148000;
14992 tp->dma_rwctrl |= 0x001b000f;
14996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14997 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14998 tp->dma_rwctrl &= 0xfffffff0;
15000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15002 /* Remove this if it causes problems for some boards. */
15003 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15005 /* On 5700/5701 chips, we need to set this bit.
15006 * Otherwise the chip will issue cacheline transactions
15007 * to streamable DMA memory with not all the byte
15008 * enables turned on. This is an error on several
15009 * RISC PCI controllers, in particular sparc64.
15011 * On 5703/5704 chips, this bit has been reassigned
15012 * a different meaning. In particular, it is used
15013 * on those chips to enable a PCI-X workaround.
15015 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15018 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15021 /* Unneeded, already done by tg3_get_invariants. */
15022 tg3_switch_clocks(tp);
15025 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15026 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15029 /* It is best to perform DMA test with maximum write burst size
15030 * to expose the 5700/5701 write DMA bug.
15032 saved_dma_rwctrl = tp->dma_rwctrl;
15033 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15034 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15039 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15042 /* Send the buffer to the chip. */
15043 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15045 dev_err(&tp->pdev->dev,
15046 "%s: Buffer write failed. err = %d\n",
15052 /* validate data reached card RAM correctly. */
15053 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15055 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15056 if (le32_to_cpu(val) != p[i]) {
15057 dev_err(&tp->pdev->dev,
15058 "%s: Buffer corrupted on device! "
15059 "(%d != %d)\n", __func__, val, i);
15060 /* ret = -ENODEV here? */
15065 /* Now read it back. */
15066 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15068 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15069 "err = %d\n", __func__, ret);
15074 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15078 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15079 DMA_RWCTRL_WRITE_BNDRY_16) {
15080 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15081 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15082 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15085 dev_err(&tp->pdev->dev,
15086 "%s: Buffer corrupted on read back! "
15087 "(%d != %d)\n", __func__, p[i], i);
15093 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15099 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15100 DMA_RWCTRL_WRITE_BNDRY_16) {
15101 /* DMA test passed without adjusting DMA boundary,
15102 * now look for chipsets that are known to expose the
15103 * DMA bug without failing the test.
15105 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15106 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15107 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15109 /* Safe to use the calculated DMA boundary. */
15110 tp->dma_rwctrl = saved_dma_rwctrl;
15113 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15117 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15122 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15124 if (tg3_flag(tp, 57765_PLUS)) {
15125 tp->bufmgr_config.mbuf_read_dma_low_water =
15126 DEFAULT_MB_RDMA_LOW_WATER_5705;
15127 tp->bufmgr_config.mbuf_mac_rx_low_water =
15128 DEFAULT_MB_MACRX_LOW_WATER_57765;
15129 tp->bufmgr_config.mbuf_high_water =
15130 DEFAULT_MB_HIGH_WATER_57765;
15132 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15133 DEFAULT_MB_RDMA_LOW_WATER_5705;
15134 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15135 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15136 tp->bufmgr_config.mbuf_high_water_jumbo =
15137 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15138 } else if (tg3_flag(tp, 5705_PLUS)) {
15139 tp->bufmgr_config.mbuf_read_dma_low_water =
15140 DEFAULT_MB_RDMA_LOW_WATER_5705;
15141 tp->bufmgr_config.mbuf_mac_rx_low_water =
15142 DEFAULT_MB_MACRX_LOW_WATER_5705;
15143 tp->bufmgr_config.mbuf_high_water =
15144 DEFAULT_MB_HIGH_WATER_5705;
15145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15146 tp->bufmgr_config.mbuf_mac_rx_low_water =
15147 DEFAULT_MB_MACRX_LOW_WATER_5906;
15148 tp->bufmgr_config.mbuf_high_water =
15149 DEFAULT_MB_HIGH_WATER_5906;
15152 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15153 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15154 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15155 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15156 tp->bufmgr_config.mbuf_high_water_jumbo =
15157 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15159 tp->bufmgr_config.mbuf_read_dma_low_water =
15160 DEFAULT_MB_RDMA_LOW_WATER;
15161 tp->bufmgr_config.mbuf_mac_rx_low_water =
15162 DEFAULT_MB_MACRX_LOW_WATER;
15163 tp->bufmgr_config.mbuf_high_water =
15164 DEFAULT_MB_HIGH_WATER;
15166 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15167 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15168 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15169 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15170 tp->bufmgr_config.mbuf_high_water_jumbo =
15171 DEFAULT_MB_HIGH_WATER_JUMBO;
15174 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15175 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15178 static char * __devinit tg3_phy_string(struct tg3 *tp)
15180 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15181 case TG3_PHY_ID_BCM5400: return "5400";
15182 case TG3_PHY_ID_BCM5401: return "5401";
15183 case TG3_PHY_ID_BCM5411: return "5411";
15184 case TG3_PHY_ID_BCM5701: return "5701";
15185 case TG3_PHY_ID_BCM5703: return "5703";
15186 case TG3_PHY_ID_BCM5704: return "5704";
15187 case TG3_PHY_ID_BCM5705: return "5705";
15188 case TG3_PHY_ID_BCM5750: return "5750";
15189 case TG3_PHY_ID_BCM5752: return "5752";
15190 case TG3_PHY_ID_BCM5714: return "5714";
15191 case TG3_PHY_ID_BCM5780: return "5780";
15192 case TG3_PHY_ID_BCM5755: return "5755";
15193 case TG3_PHY_ID_BCM5787: return "5787";
15194 case TG3_PHY_ID_BCM5784: return "5784";
15195 case TG3_PHY_ID_BCM5756: return "5722/5756";
15196 case TG3_PHY_ID_BCM5906: return "5906";
15197 case TG3_PHY_ID_BCM5761: return "5761";
15198 case TG3_PHY_ID_BCM5718C: return "5718C";
15199 case TG3_PHY_ID_BCM5718S: return "5718S";
15200 case TG3_PHY_ID_BCM57765: return "57765";
15201 case TG3_PHY_ID_BCM5719C: return "5719C";
15202 case TG3_PHY_ID_BCM5720C: return "5720C";
15203 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15204 case 0: return "serdes";
15205 default: return "unknown";
15209 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15211 if (tg3_flag(tp, PCI_EXPRESS)) {
15212 strcpy(str, "PCI Express");
15214 } else if (tg3_flag(tp, PCIX_MODE)) {
15215 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15217 strcpy(str, "PCIX:");
15219 if ((clock_ctrl == 7) ||
15220 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15221 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15222 strcat(str, "133MHz");
15223 else if (clock_ctrl == 0)
15224 strcat(str, "33MHz");
15225 else if (clock_ctrl == 2)
15226 strcat(str, "50MHz");
15227 else if (clock_ctrl == 4)
15228 strcat(str, "66MHz");
15229 else if (clock_ctrl == 6)
15230 strcat(str, "100MHz");
15232 strcpy(str, "PCI:");
15233 if (tg3_flag(tp, PCI_HIGH_SPEED))
15234 strcat(str, "66MHz");
15236 strcat(str, "33MHz");
15238 if (tg3_flag(tp, PCI_32BIT))
15239 strcat(str, ":32-bit");
15241 strcat(str, ":64-bit");
15245 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15247 struct pci_dev *peer;
15248 unsigned int func, devnr = tp->pdev->devfn & ~7;
15250 for (func = 0; func < 8; func++) {
15251 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15252 if (peer && peer != tp->pdev)
15256 /* 5704 can be configured in single-port mode, set peer to
15257 * tp->pdev in that case.
15265 * We don't need to keep the refcount elevated; there's no way
15266 * to remove one half of this device without removing the other
15273 static void __devinit tg3_init_coal(struct tg3 *tp)
15275 struct ethtool_coalesce *ec = &tp->coal;
15277 memset(ec, 0, sizeof(*ec));
15278 ec->cmd = ETHTOOL_GCOALESCE;
15279 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15280 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15281 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15282 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15283 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15284 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15285 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15286 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15287 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15289 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15290 HOSTCC_MODE_CLRTICK_TXBD)) {
15291 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15292 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15293 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15294 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15297 if (tg3_flag(tp, 5705_PLUS)) {
15298 ec->rx_coalesce_usecs_irq = 0;
15299 ec->tx_coalesce_usecs_irq = 0;
15300 ec->stats_block_coalesce_usecs = 0;
15304 static const struct net_device_ops tg3_netdev_ops = {
15305 .ndo_open = tg3_open,
15306 .ndo_stop = tg3_close,
15307 .ndo_start_xmit = tg3_start_xmit,
15308 .ndo_get_stats64 = tg3_get_stats64,
15309 .ndo_validate_addr = eth_validate_addr,
15310 .ndo_set_rx_mode = tg3_set_rx_mode,
15311 .ndo_set_mac_address = tg3_set_mac_addr,
15312 .ndo_do_ioctl = tg3_ioctl,
15313 .ndo_tx_timeout = tg3_tx_timeout,
15314 .ndo_change_mtu = tg3_change_mtu,
15315 .ndo_fix_features = tg3_fix_features,
15316 .ndo_set_features = tg3_set_features,
15317 #ifdef CONFIG_NET_POLL_CONTROLLER
15318 .ndo_poll_controller = tg3_poll_controller,
15322 static int __devinit tg3_init_one(struct pci_dev *pdev,
15323 const struct pci_device_id *ent)
15325 struct net_device *dev;
15327 int i, err, pm_cap;
15328 u32 sndmbx, rcvmbx, intmbx;
15330 u64 dma_mask, persist_dma_mask;
15331 netdev_features_t features = 0;
15333 printk_once(KERN_INFO "%s\n", version);
15335 err = pci_enable_device(pdev);
15337 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15341 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15343 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15344 goto err_out_disable_pdev;
15347 pci_set_master(pdev);
15349 /* Find power-management capability. */
15350 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15352 dev_err(&pdev->dev,
15353 "Cannot find Power Management capability, aborting\n");
15355 goto err_out_free_res;
15358 err = pci_set_power_state(pdev, PCI_D0);
15360 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15361 goto err_out_free_res;
15364 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15366 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15368 goto err_out_power_down;
15371 SET_NETDEV_DEV(dev, &pdev->dev);
15373 tp = netdev_priv(dev);
15376 tp->pm_cap = pm_cap;
15377 tp->rx_mode = TG3_DEF_RX_MODE;
15378 tp->tx_mode = TG3_DEF_TX_MODE;
15381 tp->msg_enable = tg3_debug;
15383 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15385 /* The word/byte swap controls here control register access byte
15386 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15389 tp->misc_host_ctrl =
15390 MISC_HOST_CTRL_MASK_PCI_INT |
15391 MISC_HOST_CTRL_WORD_SWAP |
15392 MISC_HOST_CTRL_INDIR_ACCESS |
15393 MISC_HOST_CTRL_PCISTATE_RW;
15395 /* The NONFRM (non-frame) byte/word swap controls take effect
15396 * on descriptor entries, anything which isn't packet data.
15398 * The StrongARM chips on the board (one for tx, one for rx)
15399 * are running in big-endian mode.
15401 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15402 GRC_MODE_WSWAP_NONFRM_DATA);
15403 #ifdef __BIG_ENDIAN
15404 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15406 spin_lock_init(&tp->lock);
15407 spin_lock_init(&tp->indirect_lock);
15408 INIT_WORK(&tp->reset_task, tg3_reset_task);
15410 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15412 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15414 goto err_out_free_dev;
15417 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15418 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15419 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15420 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15421 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15422 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15423 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15424 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15425 tg3_flag_set(tp, ENABLE_APE);
15426 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15427 if (!tp->aperegs) {
15428 dev_err(&pdev->dev,
15429 "Cannot map APE registers, aborting\n");
15431 goto err_out_iounmap;
15435 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15436 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15438 dev->ethtool_ops = &tg3_ethtool_ops;
15439 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15440 dev->netdev_ops = &tg3_netdev_ops;
15441 dev->irq = pdev->irq;
15443 err = tg3_get_invariants(tp);
15445 dev_err(&pdev->dev,
15446 "Problem fetching invariants of chip, aborting\n");
15447 goto err_out_apeunmap;
15450 /* The EPB bridge inside 5714, 5715, and 5780 and any
15451 * device behind the EPB cannot support DMA addresses > 40-bit.
15452 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15453 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15454 * do DMA address check in tg3_start_xmit().
15456 if (tg3_flag(tp, IS_5788))
15457 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15458 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15459 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15460 #ifdef CONFIG_HIGHMEM
15461 dma_mask = DMA_BIT_MASK(64);
15464 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15466 /* Configure DMA attributes. */
15467 if (dma_mask > DMA_BIT_MASK(32)) {
15468 err = pci_set_dma_mask(pdev, dma_mask);
15470 features |= NETIF_F_HIGHDMA;
15471 err = pci_set_consistent_dma_mask(pdev,
15474 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15475 "DMA for consistent allocations\n");
15476 goto err_out_apeunmap;
15480 if (err || dma_mask == DMA_BIT_MASK(32)) {
15481 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15483 dev_err(&pdev->dev,
15484 "No usable DMA configuration, aborting\n");
15485 goto err_out_apeunmap;
15489 tg3_init_bufmgr_config(tp);
15491 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15493 /* 5700 B0 chips do not support checksumming correctly due
15494 * to hardware bugs.
15496 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15497 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15499 if (tg3_flag(tp, 5755_PLUS))
15500 features |= NETIF_F_IPV6_CSUM;
15503 /* TSO is on by default on chips that support hardware TSO.
15504 * Firmware TSO on older chips gives lower performance, so it
15505 * is off by default, but can be enabled using ethtool.
15507 if ((tg3_flag(tp, HW_TSO_1) ||
15508 tg3_flag(tp, HW_TSO_2) ||
15509 tg3_flag(tp, HW_TSO_3)) &&
15510 (features & NETIF_F_IP_CSUM))
15511 features |= NETIF_F_TSO;
15512 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15513 if (features & NETIF_F_IPV6_CSUM)
15514 features |= NETIF_F_TSO6;
15515 if (tg3_flag(tp, HW_TSO_3) ||
15516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15517 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15518 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15521 features |= NETIF_F_TSO_ECN;
15524 dev->features |= features;
15525 dev->vlan_features |= features;
15528 * Add loopback capability only for a subset of devices that support
15529 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15530 * loopback for the remaining devices.
15532 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15533 !tg3_flag(tp, CPMU_PRESENT))
15534 /* Add the loopback capability */
15535 features |= NETIF_F_LOOPBACK;
15537 dev->hw_features |= features;
15539 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15540 !tg3_flag(tp, TSO_CAPABLE) &&
15541 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15542 tg3_flag_set(tp, MAX_RXPEND_64);
15543 tp->rx_pending = 63;
15546 err = tg3_get_device_address(tp);
15548 dev_err(&pdev->dev,
15549 "Could not obtain valid ethernet address, aborting\n");
15550 goto err_out_apeunmap;
15554 * Reset chip in case UNDI or EFI driver did not shutdown
15555 * DMA self test will enable WDMAC and we'll see (spurious)
15556 * pending DMA on the PCI bus at that point.
15558 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15559 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15560 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15561 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15564 err = tg3_test_dma(tp);
15566 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15567 goto err_out_apeunmap;
15570 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15571 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15572 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15573 for (i = 0; i < tp->irq_max; i++) {
15574 struct tg3_napi *tnapi = &tp->napi[i];
15577 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15579 tnapi->int_mbox = intmbx;
15585 tnapi->consmbox = rcvmbx;
15586 tnapi->prodmbox = sndmbx;
15589 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15591 tnapi->coal_now = HOSTCC_MODE_NOW;
15593 if (!tg3_flag(tp, SUPPORT_MSIX))
15597 * If we support MSIX, we'll be using RSS. If we're using
15598 * RSS, the first vector only handles link interrupts and the
15599 * remaining vectors handle rx and tx interrupts. Reuse the
15600 * mailbox values for the next iteration. The values we setup
15601 * above are still useful for the single vectored mode.
15616 pci_set_drvdata(pdev, dev);
15618 if (tg3_flag(tp, 5717_PLUS)) {
15619 /* Resume a low-power mode */
15620 tg3_frob_aux_power(tp, false);
15623 err = register_netdev(dev);
15625 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15626 goto err_out_apeunmap;
15629 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15630 tp->board_part_number,
15631 tp->pci_chip_rev_id,
15632 tg3_bus_string(tp, str),
15635 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15636 struct phy_device *phydev;
15637 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15639 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15640 phydev->drv->name, dev_name(&phydev->dev));
15644 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15645 ethtype = "10/100Base-TX";
15646 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15647 ethtype = "1000Base-SX";
15649 ethtype = "10/100/1000Base-T";
15651 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15652 "(WireSpeed[%d], EEE[%d])\n",
15653 tg3_phy_string(tp), ethtype,
15654 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15655 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15658 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15659 (dev->features & NETIF_F_RXCSUM) != 0,
15660 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15661 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15662 tg3_flag(tp, ENABLE_ASF) != 0,
15663 tg3_flag(tp, TSO_CAPABLE) != 0);
15664 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15666 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15667 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15669 pci_save_state(pdev);
15675 iounmap(tp->aperegs);
15676 tp->aperegs = NULL;
15688 err_out_power_down:
15689 pci_set_power_state(pdev, PCI_D3hot);
15692 pci_release_regions(pdev);
15694 err_out_disable_pdev:
15695 pci_disable_device(pdev);
15696 pci_set_drvdata(pdev, NULL);
15700 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15702 struct net_device *dev = pci_get_drvdata(pdev);
15705 struct tg3 *tp = netdev_priv(dev);
15708 release_firmware(tp->fw);
15710 tg3_reset_task_cancel(tp);
15712 if (tg3_flag(tp, USE_PHYLIB)) {
15717 unregister_netdev(dev);
15719 iounmap(tp->aperegs);
15720 tp->aperegs = NULL;
15727 pci_release_regions(pdev);
15728 pci_disable_device(pdev);
15729 pci_set_drvdata(pdev, NULL);
15733 #ifdef CONFIG_PM_SLEEP
15734 static int tg3_suspend(struct device *device)
15736 struct pci_dev *pdev = to_pci_dev(device);
15737 struct net_device *dev = pci_get_drvdata(pdev);
15738 struct tg3 *tp = netdev_priv(dev);
15741 if (!netif_running(dev))
15744 tg3_reset_task_cancel(tp);
15746 tg3_netif_stop(tp);
15748 del_timer_sync(&tp->timer);
15750 tg3_full_lock(tp, 1);
15751 tg3_disable_ints(tp);
15752 tg3_full_unlock(tp);
15754 netif_device_detach(dev);
15756 tg3_full_lock(tp, 0);
15757 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15758 tg3_flag_clear(tp, INIT_COMPLETE);
15759 tg3_full_unlock(tp);
15761 err = tg3_power_down_prepare(tp);
15765 tg3_full_lock(tp, 0);
15767 tg3_flag_set(tp, INIT_COMPLETE);
15768 err2 = tg3_restart_hw(tp, 1);
15772 tp->timer.expires = jiffies + tp->timer_offset;
15773 add_timer(&tp->timer);
15775 netif_device_attach(dev);
15776 tg3_netif_start(tp);
15779 tg3_full_unlock(tp);
15788 static int tg3_resume(struct device *device)
15790 struct pci_dev *pdev = to_pci_dev(device);
15791 struct net_device *dev = pci_get_drvdata(pdev);
15792 struct tg3 *tp = netdev_priv(dev);
15795 if (!netif_running(dev))
15798 netif_device_attach(dev);
15800 tg3_full_lock(tp, 0);
15802 tg3_flag_set(tp, INIT_COMPLETE);
15803 err = tg3_restart_hw(tp, 1);
15807 tp->timer.expires = jiffies + tp->timer_offset;
15808 add_timer(&tp->timer);
15810 tg3_netif_start(tp);
15813 tg3_full_unlock(tp);
15821 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15822 #define TG3_PM_OPS (&tg3_pm_ops)
15826 #define TG3_PM_OPS NULL
15828 #endif /* CONFIG_PM_SLEEP */
15831 * tg3_io_error_detected - called when PCI error is detected
15832 * @pdev: Pointer to PCI device
15833 * @state: The current pci connection state
15835 * This function is called after a PCI bus error affecting
15836 * this device has been detected.
15838 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15839 pci_channel_state_t state)
15841 struct net_device *netdev = pci_get_drvdata(pdev);
15842 struct tg3 *tp = netdev_priv(netdev);
15843 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15845 netdev_info(netdev, "PCI I/O error detected\n");
15849 if (!netif_running(netdev))
15854 tg3_netif_stop(tp);
15856 del_timer_sync(&tp->timer);
15858 /* Want to make sure that the reset task doesn't run */
15859 tg3_reset_task_cancel(tp);
15860 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15862 netif_device_detach(netdev);
15864 /* Clean up software state, even if MMIO is blocked */
15865 tg3_full_lock(tp, 0);
15866 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15867 tg3_full_unlock(tp);
15870 if (state == pci_channel_io_perm_failure)
15871 err = PCI_ERS_RESULT_DISCONNECT;
15873 pci_disable_device(pdev);
15881 * tg3_io_slot_reset - called after the pci bus has been reset.
15882 * @pdev: Pointer to PCI device
15884 * Restart the card from scratch, as if from a cold-boot.
15885 * At this point, the card has exprienced a hard reset,
15886 * followed by fixups by BIOS, and has its config space
15887 * set up identically to what it was at cold boot.
15889 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15891 struct net_device *netdev = pci_get_drvdata(pdev);
15892 struct tg3 *tp = netdev_priv(netdev);
15893 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15898 if (pci_enable_device(pdev)) {
15899 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15903 pci_set_master(pdev);
15904 pci_restore_state(pdev);
15905 pci_save_state(pdev);
15907 if (!netif_running(netdev)) {
15908 rc = PCI_ERS_RESULT_RECOVERED;
15912 err = tg3_power_up(tp);
15916 rc = PCI_ERS_RESULT_RECOVERED;
15925 * tg3_io_resume - called when traffic can start flowing again.
15926 * @pdev: Pointer to PCI device
15928 * This callback is called when the error recovery driver tells
15929 * us that its OK to resume normal operation.
15931 static void tg3_io_resume(struct pci_dev *pdev)
15933 struct net_device *netdev = pci_get_drvdata(pdev);
15934 struct tg3 *tp = netdev_priv(netdev);
15939 if (!netif_running(netdev))
15942 tg3_full_lock(tp, 0);
15943 tg3_flag_set(tp, INIT_COMPLETE);
15944 err = tg3_restart_hw(tp, 1);
15945 tg3_full_unlock(tp);
15947 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15951 netif_device_attach(netdev);
15953 tp->timer.expires = jiffies + tp->timer_offset;
15954 add_timer(&tp->timer);
15956 tg3_netif_start(tp);
15964 static struct pci_error_handlers tg3_err_handler = {
15965 .error_detected = tg3_io_error_detected,
15966 .slot_reset = tg3_io_slot_reset,
15967 .resume = tg3_io_resume
15970 static struct pci_driver tg3_driver = {
15971 .name = DRV_MODULE_NAME,
15972 .id_table = tg3_pci_tbl,
15973 .probe = tg3_init_one,
15974 .remove = __devexit_p(tg3_remove_one),
15975 .err_handler = &tg3_err_handler,
15976 .driver.pm = TG3_PM_OPS,
15979 static int __init tg3_init(void)
15981 return pci_register_driver(&tg3_driver);
15984 static void __exit tg3_cleanup(void)
15986 pci_unregister_driver(&tg3_driver);
15989 module_init(tg3_init);
15990 module_exit(tg3_cleanup);