2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 if (!tg3_flag(tp, ENABLE_APE))
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
773 bit = APE_LOCK_GRANT_DRIVER;
775 bit = 1 << tp->pci_fn;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
790 gnt = TG3_APE_PER_LOCK_GRANT;
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
813 return timeout_us ? 0 : -EBUSY;
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
829 return i == timeout_us / 10;
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 i, bufoff, msgoff, maxlen, apedata;
838 if (!tg3_flag(tp, APE_HAS_NCSI))
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 if (tg3_ape_wait_for_event(tp, 30000))
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
928 if (!tg3_flag(tp, ENABLE_APE))
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
946 event = APE_EVENT_STATUS_STATE_START;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
977 tg3_ape_send_event(tp, event);
980 static void tg3_disable_ints(struct tg3 *tp)
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
990 static void tg3_enable_ints(struct tg3 *tp)
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 tp->coal_now |= tnapi->coal_now;
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1016 tw32(HOSTCC_MODE, tp->coal_now);
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1052 struct tg3 *tp = tnapi->tp;
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1066 static void tg3_switch_clocks(struct tg3 *tp)
1069 u32 orig_clock_ctrl;
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1080 tp->pci_clock_ctrl = clock_ctrl;
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1099 #define PHY_BUSY_LOOPS 5000
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1124 tw32_f(MAC_MI_COM, frame_val);
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1129 frame_val = tr32(MAC_MI_COM);
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1133 frame_val = tr32(MAC_MI_COM);
1141 *val = frame_val & MI_COM_DATA_MASK;
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1186 tw32_f(MAC_MI_COM, frame_val);
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1194 frame_val = tr32(MAC_MI_COM);
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if ((phy_control & BMCR_RESET) == 0) {
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1362 struct tg3 *tp = bp->priv;
1365 spin_lock_bh(&tp->lock);
1367 if (tg3_readphy(tp, reg, &val))
1370 spin_unlock_bh(&tp->lock);
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1377 struct tg3 *tp = bp->priv;
1380 spin_lock_bh(&tp->lock);
1382 if (tg3_writephy(tp, reg, val))
1385 spin_unlock_bh(&tp->lock);
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1398 struct phy_device *phydev;
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1439 tw32(MAC_PHYCFG2, val);
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1473 tw32(MAC_EXT_RGMII_MODE, val);
1476 static void tg3_mdio_start(struct tg3 *tp)
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1487 static int tg3_mdio_init(struct tg3 *tp)
1491 struct phy_device *phydev;
1493 if (tg3_flag(tp, 5717_PLUS)) {
1496 tp->phy_addr = tp->pci_fn + 1;
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1536 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1539 i = mdiobus_register(tp->mdio_bus);
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1592 static void tg3_mdio_fini(struct tg3 *tp)
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1610 tp->last_event_jiffies = jiffies;
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1619 unsigned int delay_cnt;
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1626 if (time_remain < 0)
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1648 if (!tg3_readphy(tp, MII_BMCR, ®))
1650 if (!tg3_readphy(tp, MII_BMSR, ®))
1651 val |= (reg & 0xffff);
1655 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1657 if (!tg3_readphy(tp, MII_LPA, ®))
1658 val |= (reg & 0xffff);
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1665 if (!tg3_readphy(tp, MII_STAT1000, ®))
1666 val |= (reg & 0xffff);
1670 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1685 tg3_phy_gather_ump_data(tp, data);
1687 tg3_wait_for_event_ack(tp);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1696 tg3_generate_fw_event(tp);
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1708 tg3_generate_fw_event(tp);
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 static int tg3_poll_fw(struct tg3 *tp)
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1834 netdev_info(tp->dev, "No firmware running\n");
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1847 static void tg3_link_report(struct tg3 *tp)
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1856 (tp->link_config.active_speed == SPEED_100 ?
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1871 tg3_ump_link_report(tp);
1874 tp->link_up = netif_carrier_ok(tp->dev);
1877 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1881 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1882 miireg = ADVERTISE_1000XPAUSE;
1883 else if (flow_ctrl & FLOW_CTRL_TX)
1884 miireg = ADVERTISE_1000XPSE_ASYM;
1885 else if (flow_ctrl & FLOW_CTRL_RX)
1886 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1893 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1897 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1898 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1899 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1900 if (lcladv & ADVERTISE_1000XPAUSE)
1902 if (rmtadv & ADVERTISE_1000XPAUSE)
1909 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1913 u32 old_rx_mode = tp->rx_mode;
1914 u32 old_tx_mode = tp->tx_mode;
1916 if (tg3_flag(tp, USE_PHYLIB))
1917 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1919 autoneg = tp->link_config.autoneg;
1921 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1922 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1923 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1925 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1927 flowctrl = tp->link_config.flowctrl;
1929 tp->link_config.active_flowctrl = flowctrl;
1931 if (flowctrl & FLOW_CTRL_RX)
1932 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1934 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1936 if (old_rx_mode != tp->rx_mode)
1937 tw32_f(MAC_RX_MODE, tp->rx_mode);
1939 if (flowctrl & FLOW_CTRL_TX)
1940 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1942 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1944 if (old_tx_mode != tp->tx_mode)
1945 tw32_f(MAC_TX_MODE, tp->tx_mode);
1948 static void tg3_adjust_link(struct net_device *dev)
1950 u8 oldflowctrl, linkmesg = 0;
1951 u32 mac_mode, lcl_adv, rmt_adv;
1952 struct tg3 *tp = netdev_priv(dev);
1953 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1955 spin_lock_bh(&tp->lock);
1957 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1958 MAC_MODE_HALF_DUPLEX);
1960 oldflowctrl = tp->link_config.active_flowctrl;
1966 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1967 mac_mode |= MAC_MODE_PORT_MODE_MII;
1968 else if (phydev->speed == SPEED_1000 ||
1969 tg3_asic_rev(tp) != ASIC_REV_5785)
1970 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1972 mac_mode |= MAC_MODE_PORT_MODE_MII;
1974 if (phydev->duplex == DUPLEX_HALF)
1975 mac_mode |= MAC_MODE_HALF_DUPLEX;
1977 lcl_adv = mii_advertise_flowctrl(
1978 tp->link_config.flowctrl);
1981 rmt_adv = LPA_PAUSE_CAP;
1982 if (phydev->asym_pause)
1983 rmt_adv |= LPA_PAUSE_ASYM;
1986 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1988 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1990 if (mac_mode != tp->mac_mode) {
1991 tp->mac_mode = mac_mode;
1992 tw32_f(MAC_MODE, tp->mac_mode);
1996 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1997 if (phydev->speed == SPEED_10)
1999 MAC_MI_STAT_10MBPS_MODE |
2000 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2002 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2005 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2006 tw32(MAC_TX_LENGTHS,
2007 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2008 (6 << TX_LENGTHS_IPG_SHIFT) |
2009 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2011 tw32(MAC_TX_LENGTHS,
2012 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2013 (6 << TX_LENGTHS_IPG_SHIFT) |
2014 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2016 if (phydev->link != tp->old_link ||
2017 phydev->speed != tp->link_config.active_speed ||
2018 phydev->duplex != tp->link_config.active_duplex ||
2019 oldflowctrl != tp->link_config.active_flowctrl)
2022 tp->old_link = phydev->link;
2023 tp->link_config.active_speed = phydev->speed;
2024 tp->link_config.active_duplex = phydev->duplex;
2026 spin_unlock_bh(&tp->lock);
2029 tg3_link_report(tp);
2032 static int tg3_phy_init(struct tg3 *tp)
2034 struct phy_device *phydev;
2036 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2039 /* Bring the PHY back to a known state. */
2042 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2044 /* Attach the MAC to the PHY. */
2045 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2046 tg3_adjust_link, phydev->interface);
2047 if (IS_ERR(phydev)) {
2048 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2049 return PTR_ERR(phydev);
2052 /* Mask with MAC supported features. */
2053 switch (phydev->interface) {
2054 case PHY_INTERFACE_MODE_GMII:
2055 case PHY_INTERFACE_MODE_RGMII:
2056 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2057 phydev->supported &= (PHY_GBIT_FEATURES |
2059 SUPPORTED_Asym_Pause);
2063 case PHY_INTERFACE_MODE_MII:
2064 phydev->supported &= (PHY_BASIC_FEATURES |
2066 SUPPORTED_Asym_Pause);
2069 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2073 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2075 phydev->advertising = phydev->supported;
2080 static void tg3_phy_start(struct tg3 *tp)
2082 struct phy_device *phydev;
2084 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2087 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2089 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2090 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2091 phydev->speed = tp->link_config.speed;
2092 phydev->duplex = tp->link_config.duplex;
2093 phydev->autoneg = tp->link_config.autoneg;
2094 phydev->advertising = tp->link_config.advertising;
2099 phy_start_aneg(phydev);
2102 static void tg3_phy_stop(struct tg3 *tp)
2104 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2107 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2110 static void tg3_phy_fini(struct tg3 *tp)
2112 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2113 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2114 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2118 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2123 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2126 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2127 /* Cannot do read-modify-write on 5401 */
2128 err = tg3_phy_auxctl_write(tp,
2129 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2130 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2135 err = tg3_phy_auxctl_read(tp,
2136 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2140 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2141 err = tg3_phy_auxctl_write(tp,
2142 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2148 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2152 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2155 tg3_writephy(tp, MII_TG3_FET_TEST,
2156 phytest | MII_TG3_FET_SHADOW_EN);
2157 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2159 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2161 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2162 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2164 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2168 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2172 if (!tg3_flag(tp, 5705_PLUS) ||
2173 (tg3_flag(tp, 5717_PLUS) &&
2174 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2177 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2178 tg3_phy_fet_toggle_apd(tp, enable);
2182 reg = MII_TG3_MISC_SHDW_WREN |
2183 MII_TG3_MISC_SHDW_SCR5_SEL |
2184 MII_TG3_MISC_SHDW_SCR5_LPED |
2185 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2186 MII_TG3_MISC_SHDW_SCR5_SDTL |
2187 MII_TG3_MISC_SHDW_SCR5_C125OE;
2188 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2189 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2191 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2194 reg = MII_TG3_MISC_SHDW_WREN |
2195 MII_TG3_MISC_SHDW_APD_SEL |
2196 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2198 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2200 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2203 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2207 if (!tg3_flag(tp, 5705_PLUS) ||
2208 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2211 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2214 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2215 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2217 tg3_writephy(tp, MII_TG3_FET_TEST,
2218 ephy | MII_TG3_FET_SHADOW_EN);
2219 if (!tg3_readphy(tp, reg, &phy)) {
2221 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2223 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2224 tg3_writephy(tp, reg, phy);
2226 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2231 ret = tg3_phy_auxctl_read(tp,
2232 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2235 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2237 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2238 tg3_phy_auxctl_write(tp,
2239 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2244 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2249 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2252 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2254 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2255 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2258 static void tg3_phy_apply_otp(struct tg3 *tp)
2267 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2270 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2271 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2272 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2274 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2275 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2276 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2278 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2279 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2280 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2282 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2283 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2285 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2286 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2288 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2289 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2290 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2292 tg3_phy_toggle_auxctl_smdsp(tp, false);
2295 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2299 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2304 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2305 current_link_up == 1 &&
2306 tp->link_config.active_duplex == DUPLEX_FULL &&
2307 (tp->link_config.active_speed == SPEED_100 ||
2308 tp->link_config.active_speed == SPEED_1000)) {
2311 if (tp->link_config.active_speed == SPEED_1000)
2312 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2314 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2316 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2318 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2319 TG3_CL45_D7_EEERES_STAT, &val);
2321 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2322 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2326 if (!tp->setlpicnt) {
2327 if (current_link_up == 1 &&
2328 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2329 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2330 tg3_phy_toggle_auxctl_smdsp(tp, false);
2333 val = tr32(TG3_CPMU_EEE_MODE);
2334 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2338 static void tg3_phy_eee_enable(struct tg3 *tp)
2342 if (tp->link_config.active_speed == SPEED_1000 &&
2343 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2344 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2345 tg3_flag(tp, 57765_CLASS)) &&
2346 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2347 val = MII_TG3_DSP_TAP26_ALNOKO |
2348 MII_TG3_DSP_TAP26_RMRXSTO;
2349 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2350 tg3_phy_toggle_auxctl_smdsp(tp, false);
2353 val = tr32(TG3_CPMU_EEE_MODE);
2354 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2357 static int tg3_wait_macro_done(struct tg3 *tp)
2364 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2365 if ((tmp32 & 0x1000) == 0)
2375 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2377 static const u32 test_pat[4][6] = {
2378 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2379 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2380 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2381 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2385 for (chan = 0; chan < 4; chan++) {
2388 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2389 (chan * 0x2000) | 0x0200);
2390 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2392 for (i = 0; i < 6; i++)
2393 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2396 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2397 if (tg3_wait_macro_done(tp)) {
2402 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2403 (chan * 0x2000) | 0x0200);
2404 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2405 if (tg3_wait_macro_done(tp)) {
2410 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2411 if (tg3_wait_macro_done(tp)) {
2416 for (i = 0; i < 6; i += 2) {
2419 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2420 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2421 tg3_wait_macro_done(tp)) {
2427 if (low != test_pat[chan][i] ||
2428 high != test_pat[chan][i+1]) {
2429 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2430 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2431 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2441 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2445 for (chan = 0; chan < 4; chan++) {
2448 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2449 (chan * 0x2000) | 0x0200);
2450 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2451 for (i = 0; i < 6; i++)
2452 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2453 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2454 if (tg3_wait_macro_done(tp))
2461 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2463 u32 reg32, phy9_orig;
2464 int retries, do_phy_reset, err;
2470 err = tg3_bmcr_reset(tp);
2476 /* Disable transmitter and interrupt. */
2477 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2481 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2483 /* Set full-duplex, 1000 mbps. */
2484 tg3_writephy(tp, MII_BMCR,
2485 BMCR_FULLDPLX | BMCR_SPEED1000);
2487 /* Set to master mode. */
2488 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2491 tg3_writephy(tp, MII_CTRL1000,
2492 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2494 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2498 /* Block the PHY control access. */
2499 tg3_phydsp_write(tp, 0x8005, 0x0800);
2501 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2504 } while (--retries);
2506 err = tg3_phy_reset_chanpat(tp);
2510 tg3_phydsp_write(tp, 0x8005, 0x0000);
2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2513 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2515 tg3_phy_toggle_auxctl_smdsp(tp, false);
2517 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2519 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2521 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2528 static void tg3_carrier_off(struct tg3 *tp)
2530 netif_carrier_off(tp->dev);
2531 tp->link_up = false;
2534 /* This will reset the tigon3 PHY if there is no valid
2535 * link unless the FORCE argument is non-zero.
2537 static int tg3_phy_reset(struct tg3 *tp)
2542 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2543 val = tr32(GRC_MISC_CFG);
2544 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2547 err = tg3_readphy(tp, MII_BMSR, &val);
2548 err |= tg3_readphy(tp, MII_BMSR, &val);
2552 if (netif_running(tp->dev) && tp->link_up) {
2553 netif_carrier_off(tp->dev);
2554 tg3_link_report(tp);
2557 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2558 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2559 tg3_asic_rev(tp) == ASIC_REV_5705) {
2560 err = tg3_phy_reset_5703_4_5(tp);
2567 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2568 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2569 cpmuctrl = tr32(TG3_CPMU_CTRL);
2570 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2572 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2575 err = tg3_bmcr_reset(tp);
2579 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2580 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2581 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2583 tw32(TG3_CPMU_CTRL, cpmuctrl);
2586 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2587 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2588 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2589 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2590 CPMU_LSPD_1000MB_MACCLK_12_5) {
2591 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2593 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2597 if (tg3_flag(tp, 5717_PLUS) &&
2598 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2601 tg3_phy_apply_otp(tp);
2603 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2604 tg3_phy_toggle_apd(tp, true);
2606 tg3_phy_toggle_apd(tp, false);
2609 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2610 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2611 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2612 tg3_phydsp_write(tp, 0x000a, 0x0323);
2613 tg3_phy_toggle_auxctl_smdsp(tp, false);
2616 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2617 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2618 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2621 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2622 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2623 tg3_phydsp_write(tp, 0x000a, 0x310b);
2624 tg3_phydsp_write(tp, 0x201f, 0x9506);
2625 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2626 tg3_phy_toggle_auxctl_smdsp(tp, false);
2628 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2629 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2630 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2631 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2632 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2633 tg3_writephy(tp, MII_TG3_TEST1,
2634 MII_TG3_TEST1_TRIM_EN | 0x4);
2636 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2638 tg3_phy_toggle_auxctl_smdsp(tp, false);
2642 /* Set Extended packet length bit (bit 14) on all chips that */
2643 /* support jumbo frames */
2644 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2645 /* Cannot do read-modify-write on 5401 */
2646 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2647 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2648 /* Set bit 14 with read-modify-write to preserve other bits */
2649 err = tg3_phy_auxctl_read(tp,
2650 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2652 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2653 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2656 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2657 * jumbo frames transmission.
2659 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2660 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2661 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2662 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2665 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2666 /* adjust output voltage */
2667 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2670 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2671 tg3_phydsp_write(tp, 0xffb, 0x4000);
2673 tg3_phy_toggle_automdix(tp, 1);
2674 tg3_phy_set_wirespeed(tp);
2678 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2679 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2680 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2681 TG3_GPIO_MSG_NEED_VAUX)
2682 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2683 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2684 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2685 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2686 (TG3_GPIO_MSG_DRVR_PRES << 12))
2688 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2689 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2690 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2691 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2692 (TG3_GPIO_MSG_NEED_VAUX << 12))
2694 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2698 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2699 tg3_asic_rev(tp) == ASIC_REV_5719)
2700 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2702 status = tr32(TG3_CPMU_DRV_STATUS);
2704 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2705 status &= ~(TG3_GPIO_MSG_MASK << shift);
2706 status |= (newstat << shift);
2708 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2709 tg3_asic_rev(tp) == ASIC_REV_5719)
2710 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2712 tw32(TG3_CPMU_DRV_STATUS, status);
2714 return status >> TG3_APE_GPIO_MSG_SHIFT;
2717 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2719 if (!tg3_flag(tp, IS_NIC))
2722 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2723 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2724 tg3_asic_rev(tp) == ASIC_REV_5720) {
2725 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2728 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2730 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2731 TG3_GRC_LCLCTL_PWRSW_DELAY);
2733 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2735 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2736 TG3_GRC_LCLCTL_PWRSW_DELAY);
2742 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2746 if (!tg3_flag(tp, IS_NIC) ||
2747 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2748 tg3_asic_rev(tp) == ASIC_REV_5701)
2751 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2753 tw32_wait_f(GRC_LOCAL_CTRL,
2754 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2755 TG3_GRC_LCLCTL_PWRSW_DELAY);
2757 tw32_wait_f(GRC_LOCAL_CTRL,
2759 TG3_GRC_LCLCTL_PWRSW_DELAY);
2761 tw32_wait_f(GRC_LOCAL_CTRL,
2762 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2763 TG3_GRC_LCLCTL_PWRSW_DELAY);
2766 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2768 if (!tg3_flag(tp, IS_NIC))
2771 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2772 tg3_asic_rev(tp) == ASIC_REV_5701) {
2773 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2774 (GRC_LCLCTRL_GPIO_OE0 |
2775 GRC_LCLCTRL_GPIO_OE1 |
2776 GRC_LCLCTRL_GPIO_OE2 |
2777 GRC_LCLCTRL_GPIO_OUTPUT0 |
2778 GRC_LCLCTRL_GPIO_OUTPUT1),
2779 TG3_GRC_LCLCTL_PWRSW_DELAY);
2780 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2781 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2782 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2783 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2784 GRC_LCLCTRL_GPIO_OE1 |
2785 GRC_LCLCTRL_GPIO_OE2 |
2786 GRC_LCLCTRL_GPIO_OUTPUT0 |
2787 GRC_LCLCTRL_GPIO_OUTPUT1 |
2789 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2790 TG3_GRC_LCLCTL_PWRSW_DELAY);
2792 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2793 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY);
2796 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2797 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY);
2801 u32 grc_local_ctrl = 0;
2803 /* Workaround to prevent overdrawing Amps. */
2804 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2805 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2806 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2808 TG3_GRC_LCLCTL_PWRSW_DELAY);
2811 /* On 5753 and variants, GPIO2 cannot be used. */
2812 no_gpio2 = tp->nic_sram_data_cfg &
2813 NIC_SRAM_DATA_CFG_NO_GPIO2;
2815 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2816 GRC_LCLCTRL_GPIO_OE1 |
2817 GRC_LCLCTRL_GPIO_OE2 |
2818 GRC_LCLCTRL_GPIO_OUTPUT1 |
2819 GRC_LCLCTRL_GPIO_OUTPUT2;
2821 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2822 GRC_LCLCTRL_GPIO_OUTPUT2);
2824 tw32_wait_f(GRC_LOCAL_CTRL,
2825 tp->grc_local_ctrl | grc_local_ctrl,
2826 TG3_GRC_LCLCTL_PWRSW_DELAY);
2828 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2830 tw32_wait_f(GRC_LOCAL_CTRL,
2831 tp->grc_local_ctrl | grc_local_ctrl,
2832 TG3_GRC_LCLCTL_PWRSW_DELAY);
2835 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2836 tw32_wait_f(GRC_LOCAL_CTRL,
2837 tp->grc_local_ctrl | grc_local_ctrl,
2838 TG3_GRC_LCLCTL_PWRSW_DELAY);
2843 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2847 /* Serialize power state transitions */
2848 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2851 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2852 msg = TG3_GPIO_MSG_NEED_VAUX;
2854 msg = tg3_set_function_status(tp, msg);
2856 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2859 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2860 tg3_pwrsrc_switch_to_vaux(tp);
2862 tg3_pwrsrc_die_with_vmain(tp);
2865 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2868 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2870 bool need_vaux = false;
2872 /* The GPIOs do something completely different on 57765. */
2873 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2876 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2877 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2878 tg3_asic_rev(tp) == ASIC_REV_5720) {
2879 tg3_frob_aux_power_5717(tp, include_wol ?
2880 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2884 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2885 struct net_device *dev_peer;
2887 dev_peer = pci_get_drvdata(tp->pdev_peer);
2889 /* remove_one() may have been run on the peer. */
2891 struct tg3 *tp_peer = netdev_priv(dev_peer);
2893 if (tg3_flag(tp_peer, INIT_COMPLETE))
2896 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2897 tg3_flag(tp_peer, ENABLE_ASF))
2902 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2903 tg3_flag(tp, ENABLE_ASF))
2907 tg3_pwrsrc_switch_to_vaux(tp);
2909 tg3_pwrsrc_die_with_vmain(tp);
2912 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2914 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2916 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2917 if (speed != SPEED_10)
2919 } else if (speed == SPEED_10)
2925 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2929 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2930 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2931 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2932 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2935 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2936 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2937 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2942 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2944 val = tr32(GRC_MISC_CFG);
2945 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2948 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2950 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2953 tg3_writephy(tp, MII_ADVERTISE, 0);
2954 tg3_writephy(tp, MII_BMCR,
2955 BMCR_ANENABLE | BMCR_ANRESTART);
2957 tg3_writephy(tp, MII_TG3_FET_TEST,
2958 phytest | MII_TG3_FET_SHADOW_EN);
2959 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2960 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2962 MII_TG3_FET_SHDW_AUXMODE4,
2965 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2968 } else if (do_low_power) {
2969 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2970 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2972 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2973 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2974 MII_TG3_AUXCTL_PCTL_VREG_11V;
2975 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2978 /* The PHY should not be powered down on some chips because
2981 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2982 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2983 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2984 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2985 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2989 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2990 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2991 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2992 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2993 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2994 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2997 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3000 /* tp->lock is held. */
3001 static int tg3_nvram_lock(struct tg3 *tp)
3003 if (tg3_flag(tp, NVRAM)) {
3006 if (tp->nvram_lock_cnt == 0) {
3007 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3008 for (i = 0; i < 8000; i++) {
3009 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3014 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3018 tp->nvram_lock_cnt++;
3023 /* tp->lock is held. */
3024 static void tg3_nvram_unlock(struct tg3 *tp)
3026 if (tg3_flag(tp, NVRAM)) {
3027 if (tp->nvram_lock_cnt > 0)
3028 tp->nvram_lock_cnt--;
3029 if (tp->nvram_lock_cnt == 0)
3030 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3034 /* tp->lock is held. */
3035 static void tg3_enable_nvram_access(struct tg3 *tp)
3037 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3038 u32 nvaccess = tr32(NVRAM_ACCESS);
3040 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3044 /* tp->lock is held. */
3045 static void tg3_disable_nvram_access(struct tg3 *tp)
3047 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3048 u32 nvaccess = tr32(NVRAM_ACCESS);
3050 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3054 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3055 u32 offset, u32 *val)
3060 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3063 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3064 EEPROM_ADDR_DEVID_MASK |
3066 tw32(GRC_EEPROM_ADDR,
3068 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3069 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3070 EEPROM_ADDR_ADDR_MASK) |
3071 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3073 for (i = 0; i < 1000; i++) {
3074 tmp = tr32(GRC_EEPROM_ADDR);
3076 if (tmp & EEPROM_ADDR_COMPLETE)
3080 if (!(tmp & EEPROM_ADDR_COMPLETE))
3083 tmp = tr32(GRC_EEPROM_DATA);
3086 * The data will always be opposite the native endian
3087 * format. Perform a blind byteswap to compensate.
3094 #define NVRAM_CMD_TIMEOUT 10000
3096 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3100 tw32(NVRAM_CMD, nvram_cmd);
3101 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3103 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3109 if (i == NVRAM_CMD_TIMEOUT)
3115 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3117 if (tg3_flag(tp, NVRAM) &&
3118 tg3_flag(tp, NVRAM_BUFFERED) &&
3119 tg3_flag(tp, FLASH) &&
3120 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3121 (tp->nvram_jedecnum == JEDEC_ATMEL))
3123 addr = ((addr / tp->nvram_pagesize) <<
3124 ATMEL_AT45DB0X1B_PAGE_POS) +
3125 (addr % tp->nvram_pagesize);
3130 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3132 if (tg3_flag(tp, NVRAM) &&
3133 tg3_flag(tp, NVRAM_BUFFERED) &&
3134 tg3_flag(tp, FLASH) &&
3135 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3136 (tp->nvram_jedecnum == JEDEC_ATMEL))
3138 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3139 tp->nvram_pagesize) +
3140 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3145 /* NOTE: Data read in from NVRAM is byteswapped according to
3146 * the byteswapping settings for all other register accesses.
3147 * tg3 devices are BE devices, so on a BE machine, the data
3148 * returned will be exactly as it is seen in NVRAM. On a LE
3149 * machine, the 32-bit value will be byteswapped.
3151 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3155 if (!tg3_flag(tp, NVRAM))
3156 return tg3_nvram_read_using_eeprom(tp, offset, val);
3158 offset = tg3_nvram_phys_addr(tp, offset);
3160 if (offset > NVRAM_ADDR_MSK)
3163 ret = tg3_nvram_lock(tp);
3167 tg3_enable_nvram_access(tp);
3169 tw32(NVRAM_ADDR, offset);
3170 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3171 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3174 *val = tr32(NVRAM_RDDATA);
3176 tg3_disable_nvram_access(tp);
3178 tg3_nvram_unlock(tp);
3183 /* Ensures NVRAM data is in bytestream format. */
3184 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3187 int res = tg3_nvram_read(tp, offset, &v);
3189 *val = cpu_to_be32(v);
3193 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3194 u32 offset, u32 len, u8 *buf)
3199 for (i = 0; i < len; i += 4) {
3205 memcpy(&data, buf + i, 4);
3208 * The SEEPROM interface expects the data to always be opposite
3209 * the native endian format. We accomplish this by reversing
3210 * all the operations that would have been performed on the
3211 * data from a call to tg3_nvram_read_be32().
3213 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3215 val = tr32(GRC_EEPROM_ADDR);
3216 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3218 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3220 tw32(GRC_EEPROM_ADDR, val |
3221 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3222 (addr & EEPROM_ADDR_ADDR_MASK) |
3226 for (j = 0; j < 1000; j++) {
3227 val = tr32(GRC_EEPROM_ADDR);
3229 if (val & EEPROM_ADDR_COMPLETE)
3233 if (!(val & EEPROM_ADDR_COMPLETE)) {
3242 /* offset and length are dword aligned */
3243 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3247 u32 pagesize = tp->nvram_pagesize;
3248 u32 pagemask = pagesize - 1;
3252 tmp = kmalloc(pagesize, GFP_KERNEL);
3258 u32 phy_addr, page_off, size;
3260 phy_addr = offset & ~pagemask;
3262 for (j = 0; j < pagesize; j += 4) {
3263 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3264 (__be32 *) (tmp + j));
3271 page_off = offset & pagemask;
3278 memcpy(tmp + page_off, buf, size);
3280 offset = offset + (pagesize - page_off);
3282 tg3_enable_nvram_access(tp);
3285 * Before we can erase the flash page, we need
3286 * to issue a special "write enable" command.
3288 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3290 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3293 /* Erase the target page */
3294 tw32(NVRAM_ADDR, phy_addr);
3296 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3297 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3299 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3302 /* Issue another write enable to start the write. */
3303 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3305 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3308 for (j = 0; j < pagesize; j += 4) {
3311 data = *((__be32 *) (tmp + j));
3313 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3315 tw32(NVRAM_ADDR, phy_addr + j);
3317 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3321 nvram_cmd |= NVRAM_CMD_FIRST;
3322 else if (j == (pagesize - 4))
3323 nvram_cmd |= NVRAM_CMD_LAST;
3325 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3333 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3334 tg3_nvram_exec_cmd(tp, nvram_cmd);
3341 /* offset and length are dword aligned */
3342 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3347 for (i = 0; i < len; i += 4, offset += 4) {
3348 u32 page_off, phy_addr, nvram_cmd;
3351 memcpy(&data, buf + i, 4);
3352 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3354 page_off = offset % tp->nvram_pagesize;
3356 phy_addr = tg3_nvram_phys_addr(tp, offset);
3358 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3360 if (page_off == 0 || i == 0)
3361 nvram_cmd |= NVRAM_CMD_FIRST;
3362 if (page_off == (tp->nvram_pagesize - 4))
3363 nvram_cmd |= NVRAM_CMD_LAST;
3366 nvram_cmd |= NVRAM_CMD_LAST;
3368 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3369 !tg3_flag(tp, FLASH) ||
3370 !tg3_flag(tp, 57765_PLUS))
3371 tw32(NVRAM_ADDR, phy_addr);
3373 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3374 !tg3_flag(tp, 5755_PLUS) &&
3375 (tp->nvram_jedecnum == JEDEC_ST) &&
3376 (nvram_cmd & NVRAM_CMD_FIRST)) {
3379 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3380 ret = tg3_nvram_exec_cmd(tp, cmd);
3384 if (!tg3_flag(tp, FLASH)) {
3385 /* We always do complete word writes to eeprom. */
3386 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3389 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3396 /* offset and length are dword aligned */
3397 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3401 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3402 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3403 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3407 if (!tg3_flag(tp, NVRAM)) {
3408 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3412 ret = tg3_nvram_lock(tp);
3416 tg3_enable_nvram_access(tp);
3417 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3418 tw32(NVRAM_WRITE1, 0x406);
3420 grc_mode = tr32(GRC_MODE);
3421 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3423 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3424 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3427 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3431 grc_mode = tr32(GRC_MODE);
3432 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3434 tg3_disable_nvram_access(tp);
3435 tg3_nvram_unlock(tp);
3438 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3439 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3446 #define RX_CPU_SCRATCH_BASE 0x30000
3447 #define RX_CPU_SCRATCH_SIZE 0x04000
3448 #define TX_CPU_SCRATCH_BASE 0x34000
3449 #define TX_CPU_SCRATCH_SIZE 0x04000
3451 /* tp->lock is held. */
3452 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3455 const int iters = 10000;
3457 for (i = 0; i < iters; i++) {
3458 tw32(cpu_base + CPU_STATE, 0xffffffff);
3459 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3460 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3464 return (i == iters) ? -EBUSY : 0;
3467 /* tp->lock is held. */
3468 static int tg3_rxcpu_pause(struct tg3 *tp)
3470 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3472 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3473 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3479 /* tp->lock is held. */
3480 static int tg3_txcpu_pause(struct tg3 *tp)
3482 return tg3_pause_cpu(tp, TX_CPU_BASE);
3485 /* tp->lock is held. */
3486 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3488 tw32(cpu_base + CPU_STATE, 0xffffffff);
3489 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3492 /* tp->lock is held. */
3493 static void tg3_rxcpu_resume(struct tg3 *tp)
3495 tg3_resume_cpu(tp, RX_CPU_BASE);
3498 /* tp->lock is held. */
3499 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3503 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3505 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3506 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3508 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3511 if (cpu_base == RX_CPU_BASE) {
3512 rc = tg3_rxcpu_pause(tp);
3515 * There is only an Rx CPU for the 5750 derivative in the
3518 if (tg3_flag(tp, IS_SSB_CORE))
3521 rc = tg3_txcpu_pause(tp);
3525 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3526 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3530 /* Clear firmware's nvram arbitration. */
3531 if (tg3_flag(tp, NVRAM))
3532 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3536 static int tg3_fw_data_len(struct tg3 *tp,
3537 const struct tg3_firmware_hdr *fw_hdr)
3541 /* Non fragmented firmware have one firmware header followed by a
3542 * contiguous chunk of data to be written. The length field in that
3543 * header is not the length of data to be written but the complete
3544 * length of the bss. The data length is determined based on
3545 * tp->fw->size minus headers.
3547 * Fragmented firmware have a main header followed by multiple
3548 * fragments. Each fragment is identical to non fragmented firmware
3549 * with a firmware header followed by a contiguous chunk of data. In
3550 * the main header, the length field is unused and set to 0xffffffff.
3551 * In each fragment header the length is the entire size of that
3552 * fragment i.e. fragment data + header length. Data length is
3553 * therefore length field in the header minus TG3_FW_HDR_LEN.
3555 if (tp->fw_len == 0xffffffff)
3556 fw_len = be32_to_cpu(fw_hdr->len);
3558 fw_len = tp->fw->size;
3560 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3563 /* tp->lock is held. */
3564 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3565 u32 cpu_scratch_base, int cpu_scratch_size,
3566 const struct tg3_firmware_hdr *fw_hdr)
3569 void (*write_op)(struct tg3 *, u32, u32);
3570 int total_len = tp->fw->size;
3572 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3574 "%s: Trying to load TX cpu firmware which is 5705\n",
3579 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3580 write_op = tg3_write_mem;
3582 write_op = tg3_write_indirect_reg32;
3584 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3585 /* It is possible that bootcode is still loading at this point.
3586 * Get the nvram lock first before halting the cpu.
3588 int lock_err = tg3_nvram_lock(tp);
3589 err = tg3_halt_cpu(tp, cpu_base);
3591 tg3_nvram_unlock(tp);
3595 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3596 write_op(tp, cpu_scratch_base + i, 0);
3597 tw32(cpu_base + CPU_STATE, 0xffffffff);
3598 tw32(cpu_base + CPU_MODE,
3599 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3601 /* Subtract additional main header for fragmented firmware and
3602 * advance to the first fragment
3604 total_len -= TG3_FW_HDR_LEN;
3609 u32 *fw_data = (u32 *)(fw_hdr + 1);
3610 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3611 write_op(tp, cpu_scratch_base +
3612 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3614 be32_to_cpu(fw_data[i]));
3616 total_len -= be32_to_cpu(fw_hdr->len);
3618 /* Advance to next fragment */
3619 fw_hdr = (struct tg3_firmware_hdr *)
3620 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3621 } while (total_len > 0);
3629 /* tp->lock is held. */
3630 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3633 const int iters = 5;
3635 tw32(cpu_base + CPU_STATE, 0xffffffff);
3636 tw32_f(cpu_base + CPU_PC, pc);
3638 for (i = 0; i < iters; i++) {
3639 if (tr32(cpu_base + CPU_PC) == pc)
3641 tw32(cpu_base + CPU_STATE, 0xffffffff);
3642 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3643 tw32_f(cpu_base + CPU_PC, pc);
3647 return (i == iters) ? -EBUSY : 0;
3650 /* tp->lock is held. */
3651 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3653 const struct tg3_firmware_hdr *fw_hdr;
3656 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3658 /* Firmware blob starts with version numbers, followed by
3659 start address and length. We are setting complete length.
3660 length = end_address_of_bss - start_address_of_text.
3661 Remainder is the blob to be loaded contiguously
3662 from start address. */
3664 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3665 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3670 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3671 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3676 /* Now startup only the RX cpu. */
3677 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3678 be32_to_cpu(fw_hdr->base_addr));
3680 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3681 "should be %08x\n", __func__,
3682 tr32(RX_CPU_BASE + CPU_PC),
3683 be32_to_cpu(fw_hdr->base_addr));
3687 tg3_rxcpu_resume(tp);
3692 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3694 const int iters = 1000;
3698 /* Wait for boot code to complete initialization and enter service
3699 * loop. It is then safe to download service patches
3701 for (i = 0; i < iters; i++) {
3702 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3709 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3713 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3715 netdev_warn(tp->dev,
3716 "Other patches exist. Not downloading EEE patch\n");
3723 /* tp->lock is held. */
3724 static void tg3_load_57766_firmware(struct tg3 *tp)
3726 struct tg3_firmware_hdr *fw_hdr;
3728 if (!tg3_flag(tp, NO_NVRAM))
3731 if (tg3_validate_rxcpu_state(tp))
3737 /* This firmware blob has a different format than older firmware
3738 * releases as given below. The main difference is we have fragmented
3739 * data to be written to non-contiguous locations.
3741 * In the beginning we have a firmware header identical to other
3742 * firmware which consists of version, base addr and length. The length
3743 * here is unused and set to 0xffffffff.
3745 * This is followed by a series of firmware fragments which are
3746 * individually identical to previous firmware. i.e. they have the
3747 * firmware header and followed by data for that fragment. The version
3748 * field of the individual fragment header is unused.
3751 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3752 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3755 if (tg3_rxcpu_pause(tp))
3758 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3759 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3761 tg3_rxcpu_resume(tp);
3764 /* tp->lock is held. */
3765 static int tg3_load_tso_firmware(struct tg3 *tp)
3767 const struct tg3_firmware_hdr *fw_hdr;
3768 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3771 if (!tg3_flag(tp, FW_TSO))
3774 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3776 /* Firmware blob starts with version numbers, followed by
3777 start address and length. We are setting complete length.
3778 length = end_address_of_bss - start_address_of_text.
3779 Remainder is the blob to be loaded contiguously
3780 from start address. */
3782 cpu_scratch_size = tp->fw_len;
3784 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3785 cpu_base = RX_CPU_BASE;
3786 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3788 cpu_base = TX_CPU_BASE;
3789 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3790 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3793 err = tg3_load_firmware_cpu(tp, cpu_base,
3794 cpu_scratch_base, cpu_scratch_size,
3799 /* Now startup the cpu. */
3800 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3801 be32_to_cpu(fw_hdr->base_addr));
3804 "%s fails to set CPU PC, is %08x should be %08x\n",
3805 __func__, tr32(cpu_base + CPU_PC),
3806 be32_to_cpu(fw_hdr->base_addr));
3810 tg3_resume_cpu(tp, cpu_base);
3815 /* tp->lock is held. */
3816 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3818 u32 addr_high, addr_low;
3821 addr_high = ((tp->dev->dev_addr[0] << 8) |
3822 tp->dev->dev_addr[1]);
3823 addr_low = ((tp->dev->dev_addr[2] << 24) |
3824 (tp->dev->dev_addr[3] << 16) |
3825 (tp->dev->dev_addr[4] << 8) |
3826 (tp->dev->dev_addr[5] << 0));
3827 for (i = 0; i < 4; i++) {
3828 if (i == 1 && skip_mac_1)
3830 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3831 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3834 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3835 tg3_asic_rev(tp) == ASIC_REV_5704) {
3836 for (i = 0; i < 12; i++) {
3837 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3838 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3842 addr_high = (tp->dev->dev_addr[0] +
3843 tp->dev->dev_addr[1] +
3844 tp->dev->dev_addr[2] +
3845 tp->dev->dev_addr[3] +
3846 tp->dev->dev_addr[4] +
3847 tp->dev->dev_addr[5]) &
3848 TX_BACKOFF_SEED_MASK;
3849 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3852 static void tg3_enable_register_access(struct tg3 *tp)
3855 * Make sure register accesses (indirect or otherwise) will function
3858 pci_write_config_dword(tp->pdev,
3859 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3862 static int tg3_power_up(struct tg3 *tp)
3866 tg3_enable_register_access(tp);
3868 err = pci_set_power_state(tp->pdev, PCI_D0);
3870 /* Switch out of Vaux if it is a NIC */
3871 tg3_pwrsrc_switch_to_vmain(tp);
3873 netdev_err(tp->dev, "Transition to D0 failed\n");
3879 static int tg3_setup_phy(struct tg3 *, int);
3881 static int tg3_power_down_prepare(struct tg3 *tp)
3884 bool device_should_wake, do_low_power;
3886 tg3_enable_register_access(tp);
3888 /* Restore the CLKREQ setting. */
3889 if (tg3_flag(tp, CLKREQ_BUG))
3890 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3891 PCI_EXP_LNKCTL_CLKREQ_EN);
3893 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3894 tw32(TG3PCI_MISC_HOST_CTRL,
3895 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3897 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3898 tg3_flag(tp, WOL_ENABLE);
3900 if (tg3_flag(tp, USE_PHYLIB)) {
3901 do_low_power = false;
3902 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3903 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3904 struct phy_device *phydev;
3905 u32 phyid, advertising;
3907 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3909 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3911 tp->link_config.speed = phydev->speed;
3912 tp->link_config.duplex = phydev->duplex;
3913 tp->link_config.autoneg = phydev->autoneg;
3914 tp->link_config.advertising = phydev->advertising;
3916 advertising = ADVERTISED_TP |
3918 ADVERTISED_Autoneg |
3919 ADVERTISED_10baseT_Half;
3921 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3922 if (tg3_flag(tp, WOL_SPEED_100MB))
3924 ADVERTISED_100baseT_Half |
3925 ADVERTISED_100baseT_Full |
3926 ADVERTISED_10baseT_Full;
3928 advertising |= ADVERTISED_10baseT_Full;
3931 phydev->advertising = advertising;
3933 phy_start_aneg(phydev);
3935 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3936 if (phyid != PHY_ID_BCMAC131) {
3937 phyid &= PHY_BCM_OUI_MASK;
3938 if (phyid == PHY_BCM_OUI_1 ||
3939 phyid == PHY_BCM_OUI_2 ||
3940 phyid == PHY_BCM_OUI_3)
3941 do_low_power = true;
3945 do_low_power = true;
3947 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3948 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3950 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3951 tg3_setup_phy(tp, 0);
3954 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3957 val = tr32(GRC_VCPU_EXT_CTRL);
3958 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3959 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3963 for (i = 0; i < 200; i++) {
3964 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3965 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3970 if (tg3_flag(tp, WOL_CAP))
3971 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3972 WOL_DRV_STATE_SHUTDOWN |
3976 if (device_should_wake) {
3979 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3981 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3982 tg3_phy_auxctl_write(tp,
3983 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3984 MII_TG3_AUXCTL_PCTL_WOL_EN |
3985 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3986 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3990 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3991 mac_mode = MAC_MODE_PORT_MODE_GMII;
3993 mac_mode = MAC_MODE_PORT_MODE_MII;
3995 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3996 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
3997 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3998 SPEED_100 : SPEED_10;
3999 if (tg3_5700_link_polarity(tp, speed))
4000 mac_mode |= MAC_MODE_LINK_POLARITY;
4002 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4005 mac_mode = MAC_MODE_PORT_MODE_TBI;
4008 if (!tg3_flag(tp, 5750_PLUS))
4009 tw32(MAC_LED_CTRL, tp->led_ctrl);
4011 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4012 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4013 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4014 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4016 if (tg3_flag(tp, ENABLE_APE))
4017 mac_mode |= MAC_MODE_APE_TX_EN |
4018 MAC_MODE_APE_RX_EN |
4019 MAC_MODE_TDE_ENABLE;
4021 tw32_f(MAC_MODE, mac_mode);
4024 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4028 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4029 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4030 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4033 base_val = tp->pci_clock_ctrl;
4034 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4035 CLOCK_CTRL_TXCLK_DISABLE);
4037 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4038 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4039 } else if (tg3_flag(tp, 5780_CLASS) ||
4040 tg3_flag(tp, CPMU_PRESENT) ||
4041 tg3_asic_rev(tp) == ASIC_REV_5906) {
4043 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4044 u32 newbits1, newbits2;
4046 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4047 tg3_asic_rev(tp) == ASIC_REV_5701) {
4048 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4049 CLOCK_CTRL_TXCLK_DISABLE |
4051 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4052 } else if (tg3_flag(tp, 5705_PLUS)) {
4053 newbits1 = CLOCK_CTRL_625_CORE;
4054 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4056 newbits1 = CLOCK_CTRL_ALTCLK;
4057 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4060 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4063 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4066 if (!tg3_flag(tp, 5705_PLUS)) {
4069 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4070 tg3_asic_rev(tp) == ASIC_REV_5701) {
4071 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4072 CLOCK_CTRL_TXCLK_DISABLE |
4073 CLOCK_CTRL_44MHZ_CORE);
4075 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4078 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4079 tp->pci_clock_ctrl | newbits3, 40);
4083 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4084 tg3_power_down_phy(tp, do_low_power);
4086 tg3_frob_aux_power(tp, true);
4088 /* Workaround for unstable PLL clock */
4089 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4090 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4091 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4092 u32 val = tr32(0x7d00);
4094 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4096 if (!tg3_flag(tp, ENABLE_ASF)) {
4099 err = tg3_nvram_lock(tp);
4100 tg3_halt_cpu(tp, RX_CPU_BASE);
4102 tg3_nvram_unlock(tp);
4106 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4111 static void tg3_power_down(struct tg3 *tp)
4113 tg3_power_down_prepare(tp);
4115 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4116 pci_set_power_state(tp->pdev, PCI_D3hot);
4119 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4121 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4122 case MII_TG3_AUX_STAT_10HALF:
4124 *duplex = DUPLEX_HALF;
4127 case MII_TG3_AUX_STAT_10FULL:
4129 *duplex = DUPLEX_FULL;
4132 case MII_TG3_AUX_STAT_100HALF:
4134 *duplex = DUPLEX_HALF;
4137 case MII_TG3_AUX_STAT_100FULL:
4139 *duplex = DUPLEX_FULL;
4142 case MII_TG3_AUX_STAT_1000HALF:
4143 *speed = SPEED_1000;
4144 *duplex = DUPLEX_HALF;
4147 case MII_TG3_AUX_STAT_1000FULL:
4148 *speed = SPEED_1000;
4149 *duplex = DUPLEX_FULL;
4153 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4154 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4156 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4160 *speed = SPEED_UNKNOWN;
4161 *duplex = DUPLEX_UNKNOWN;
4166 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4171 new_adv = ADVERTISE_CSMA;
4172 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4173 new_adv |= mii_advertise_flowctrl(flowctrl);
4175 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4179 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4180 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4182 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4183 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4184 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4186 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4191 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4194 tw32(TG3_CPMU_EEE_MODE,
4195 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4197 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4202 /* Advertise 100-BaseTX EEE ability */
4203 if (advertise & ADVERTISED_100baseT_Full)
4204 val |= MDIO_AN_EEE_ADV_100TX;
4205 /* Advertise 1000-BaseT EEE ability */
4206 if (advertise & ADVERTISED_1000baseT_Full)
4207 val |= MDIO_AN_EEE_ADV_1000T;
4208 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4212 switch (tg3_asic_rev(tp)) {
4214 case ASIC_REV_57765:
4215 case ASIC_REV_57766:
4217 /* If we advertised any eee advertisements above... */
4219 val = MII_TG3_DSP_TAP26_ALNOKO |
4220 MII_TG3_DSP_TAP26_RMRXSTO |
4221 MII_TG3_DSP_TAP26_OPCSINPT;
4222 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4226 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4227 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4228 MII_TG3_DSP_CH34TP2_HIBW01);
4231 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4240 static void tg3_phy_copper_begin(struct tg3 *tp)
4242 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4243 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4246 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4247 adv = ADVERTISED_10baseT_Half |
4248 ADVERTISED_10baseT_Full;
4249 if (tg3_flag(tp, WOL_SPEED_100MB))
4250 adv |= ADVERTISED_100baseT_Half |
4251 ADVERTISED_100baseT_Full;
4253 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4255 adv = tp->link_config.advertising;
4256 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4257 adv &= ~(ADVERTISED_1000baseT_Half |
4258 ADVERTISED_1000baseT_Full);
4260 fc = tp->link_config.flowctrl;
4263 tg3_phy_autoneg_cfg(tp, adv, fc);
4265 tg3_writephy(tp, MII_BMCR,
4266 BMCR_ANENABLE | BMCR_ANRESTART);
4269 u32 bmcr, orig_bmcr;
4271 tp->link_config.active_speed = tp->link_config.speed;
4272 tp->link_config.active_duplex = tp->link_config.duplex;
4274 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4275 /* With autoneg disabled, 5715 only links up when the
4276 * advertisement register has the configured speed
4279 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4283 switch (tp->link_config.speed) {
4289 bmcr |= BMCR_SPEED100;
4293 bmcr |= BMCR_SPEED1000;
4297 if (tp->link_config.duplex == DUPLEX_FULL)
4298 bmcr |= BMCR_FULLDPLX;
4300 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4301 (bmcr != orig_bmcr)) {
4302 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4303 for (i = 0; i < 1500; i++) {
4307 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4308 tg3_readphy(tp, MII_BMSR, &tmp))
4310 if (!(tmp & BMSR_LSTATUS)) {
4315 tg3_writephy(tp, MII_BMCR, bmcr);
4321 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4325 /* Turn off tap power management. */
4326 /* Set Extended packet length bit */
4327 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4329 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4330 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4331 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4332 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4333 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4340 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4342 u32 advmsk, tgtadv, advertising;
4344 advertising = tp->link_config.advertising;
4345 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4347 advmsk = ADVERTISE_ALL;
4348 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4349 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4350 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4353 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4356 if ((*lcladv & advmsk) != tgtadv)
4359 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4362 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4364 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4368 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4369 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4370 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4371 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4372 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4374 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4377 if (tg3_ctrl != tgtadv)
4384 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4388 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4391 if (tg3_readphy(tp, MII_STAT1000, &val))
4394 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4397 if (tg3_readphy(tp, MII_LPA, rmtadv))
4400 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4401 tp->link_config.rmt_adv = lpeth;
4406 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4408 if (curr_link_up != tp->link_up) {
4410 netif_carrier_on(tp->dev);
4412 netif_carrier_off(tp->dev);
4413 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4414 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4417 tg3_link_report(tp);
4424 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4426 int current_link_up;
4428 u32 lcl_adv, rmt_adv;
4436 (MAC_STATUS_SYNC_CHANGED |
4437 MAC_STATUS_CFG_CHANGED |
4438 MAC_STATUS_MI_COMPLETION |
4439 MAC_STATUS_LNKSTATE_CHANGED));
4442 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4444 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4448 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4450 /* Some third-party PHYs need to be reset on link going
4453 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4454 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4455 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4457 tg3_readphy(tp, MII_BMSR, &bmsr);
4458 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4459 !(bmsr & BMSR_LSTATUS))
4465 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4466 tg3_readphy(tp, MII_BMSR, &bmsr);
4467 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4468 !tg3_flag(tp, INIT_COMPLETE))
4471 if (!(bmsr & BMSR_LSTATUS)) {
4472 err = tg3_init_5401phy_dsp(tp);
4476 tg3_readphy(tp, MII_BMSR, &bmsr);
4477 for (i = 0; i < 1000; i++) {
4479 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4480 (bmsr & BMSR_LSTATUS)) {
4486 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4487 TG3_PHY_REV_BCM5401_B0 &&
4488 !(bmsr & BMSR_LSTATUS) &&
4489 tp->link_config.active_speed == SPEED_1000) {
4490 err = tg3_phy_reset(tp);
4492 err = tg3_init_5401phy_dsp(tp);
4497 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4498 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4499 /* 5701 {A0,B0} CRC bug workaround */
4500 tg3_writephy(tp, 0x15, 0x0a75);
4501 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4502 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4503 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4506 /* Clear pending interrupts... */
4507 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4508 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4510 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4511 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4512 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4513 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4515 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4516 tg3_asic_rev(tp) == ASIC_REV_5701) {
4517 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4518 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4519 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4521 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4524 current_link_up = 0;
4525 current_speed = SPEED_UNKNOWN;
4526 current_duplex = DUPLEX_UNKNOWN;
4527 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4528 tp->link_config.rmt_adv = 0;
4530 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4531 err = tg3_phy_auxctl_read(tp,
4532 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4534 if (!err && !(val & (1 << 10))) {
4535 tg3_phy_auxctl_write(tp,
4536 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4543 for (i = 0; i < 100; i++) {
4544 tg3_readphy(tp, MII_BMSR, &bmsr);
4545 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4546 (bmsr & BMSR_LSTATUS))
4551 if (bmsr & BMSR_LSTATUS) {
4554 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4555 for (i = 0; i < 2000; i++) {
4557 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4562 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4567 for (i = 0; i < 200; i++) {
4568 tg3_readphy(tp, MII_BMCR, &bmcr);
4569 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4571 if (bmcr && bmcr != 0x7fff)
4579 tp->link_config.active_speed = current_speed;
4580 tp->link_config.active_duplex = current_duplex;
4582 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4583 if ((bmcr & BMCR_ANENABLE) &&
4584 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4585 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4586 current_link_up = 1;
4588 if (!(bmcr & BMCR_ANENABLE) &&
4589 tp->link_config.speed == current_speed &&
4590 tp->link_config.duplex == current_duplex &&
4591 tp->link_config.flowctrl ==
4592 tp->link_config.active_flowctrl) {
4593 current_link_up = 1;
4597 if (current_link_up == 1 &&
4598 tp->link_config.active_duplex == DUPLEX_FULL) {
4601 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4602 reg = MII_TG3_FET_GEN_STAT;
4603 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4605 reg = MII_TG3_EXT_STAT;
4606 bit = MII_TG3_EXT_STAT_MDIX;
4609 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4610 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4612 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4617 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4618 tg3_phy_copper_begin(tp);
4620 if (tg3_flag(tp, ROBOSWITCH)) {
4621 current_link_up = 1;
4622 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4623 current_speed = SPEED_1000;
4624 current_duplex = DUPLEX_FULL;
4625 tp->link_config.active_speed = current_speed;
4626 tp->link_config.active_duplex = current_duplex;
4629 tg3_readphy(tp, MII_BMSR, &bmsr);
4630 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4631 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4632 current_link_up = 1;
4635 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4636 if (current_link_up == 1) {
4637 if (tp->link_config.active_speed == SPEED_100 ||
4638 tp->link_config.active_speed == SPEED_10)
4639 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4641 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4642 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4643 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4645 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4647 /* In order for the 5750 core in BCM4785 chip to work properly
4648 * in RGMII mode, the Led Control Register must be set up.
4650 if (tg3_flag(tp, RGMII_MODE)) {
4651 u32 led_ctrl = tr32(MAC_LED_CTRL);
4652 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4654 if (tp->link_config.active_speed == SPEED_10)
4655 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4656 else if (tp->link_config.active_speed == SPEED_100)
4657 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4658 LED_CTRL_100MBPS_ON);
4659 else if (tp->link_config.active_speed == SPEED_1000)
4660 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4661 LED_CTRL_1000MBPS_ON);
4663 tw32(MAC_LED_CTRL, led_ctrl);
4667 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4668 if (tp->link_config.active_duplex == DUPLEX_HALF)
4669 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4671 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4672 if (current_link_up == 1 &&
4673 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4674 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4676 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4679 /* ??? Without this setting Netgear GA302T PHY does not
4680 * ??? send/receive packets...
4682 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4683 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4684 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4685 tw32_f(MAC_MI_MODE, tp->mi_mode);
4689 tw32_f(MAC_MODE, tp->mac_mode);
4692 tg3_phy_eee_adjust(tp, current_link_up);
4694 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4695 /* Polled via timer. */
4696 tw32_f(MAC_EVENT, 0);
4698 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4702 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4703 current_link_up == 1 &&
4704 tp->link_config.active_speed == SPEED_1000 &&
4705 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4708 (MAC_STATUS_SYNC_CHANGED |
4709 MAC_STATUS_CFG_CHANGED));
4712 NIC_SRAM_FIRMWARE_MBOX,
4713 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4716 /* Prevent send BD corruption. */
4717 if (tg3_flag(tp, CLKREQ_BUG)) {
4718 if (tp->link_config.active_speed == SPEED_100 ||
4719 tp->link_config.active_speed == SPEED_10)
4720 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4721 PCI_EXP_LNKCTL_CLKREQ_EN);
4723 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4724 PCI_EXP_LNKCTL_CLKREQ_EN);
4727 tg3_test_and_report_link_chg(tp, current_link_up);
4732 struct tg3_fiber_aneginfo {
4734 #define ANEG_STATE_UNKNOWN 0
4735 #define ANEG_STATE_AN_ENABLE 1
4736 #define ANEG_STATE_RESTART_INIT 2
4737 #define ANEG_STATE_RESTART 3
4738 #define ANEG_STATE_DISABLE_LINK_OK 4
4739 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4740 #define ANEG_STATE_ABILITY_DETECT 6
4741 #define ANEG_STATE_ACK_DETECT_INIT 7
4742 #define ANEG_STATE_ACK_DETECT 8
4743 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4744 #define ANEG_STATE_COMPLETE_ACK 10
4745 #define ANEG_STATE_IDLE_DETECT_INIT 11
4746 #define ANEG_STATE_IDLE_DETECT 12
4747 #define ANEG_STATE_LINK_OK 13
4748 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4749 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4752 #define MR_AN_ENABLE 0x00000001
4753 #define MR_RESTART_AN 0x00000002
4754 #define MR_AN_COMPLETE 0x00000004
4755 #define MR_PAGE_RX 0x00000008
4756 #define MR_NP_LOADED 0x00000010
4757 #define MR_TOGGLE_TX 0x00000020
4758 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4759 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4760 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4761 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4762 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4763 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4764 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4765 #define MR_TOGGLE_RX 0x00002000
4766 #define MR_NP_RX 0x00004000
4768 #define MR_LINK_OK 0x80000000
4770 unsigned long link_time, cur_time;
4772 u32 ability_match_cfg;
4773 int ability_match_count;
4775 char ability_match, idle_match, ack_match;
4777 u32 txconfig, rxconfig;
4778 #define ANEG_CFG_NP 0x00000080
4779 #define ANEG_CFG_ACK 0x00000040
4780 #define ANEG_CFG_RF2 0x00000020
4781 #define ANEG_CFG_RF1 0x00000010
4782 #define ANEG_CFG_PS2 0x00000001
4783 #define ANEG_CFG_PS1 0x00008000
4784 #define ANEG_CFG_HD 0x00004000
4785 #define ANEG_CFG_FD 0x00002000
4786 #define ANEG_CFG_INVAL 0x00001f06
4791 #define ANEG_TIMER_ENAB 2
4792 #define ANEG_FAILED -1
4794 #define ANEG_STATE_SETTLE_TIME 10000
4796 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4797 struct tg3_fiber_aneginfo *ap)
4800 unsigned long delta;
4804 if (ap->state == ANEG_STATE_UNKNOWN) {
4808 ap->ability_match_cfg = 0;
4809 ap->ability_match_count = 0;
4810 ap->ability_match = 0;
4816 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4817 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4819 if (rx_cfg_reg != ap->ability_match_cfg) {
4820 ap->ability_match_cfg = rx_cfg_reg;
4821 ap->ability_match = 0;
4822 ap->ability_match_count = 0;
4824 if (++ap->ability_match_count > 1) {
4825 ap->ability_match = 1;
4826 ap->ability_match_cfg = rx_cfg_reg;
4829 if (rx_cfg_reg & ANEG_CFG_ACK)
4837 ap->ability_match_cfg = 0;
4838 ap->ability_match_count = 0;
4839 ap->ability_match = 0;
4845 ap->rxconfig = rx_cfg_reg;
4848 switch (ap->state) {
4849 case ANEG_STATE_UNKNOWN:
4850 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4851 ap->state = ANEG_STATE_AN_ENABLE;
4854 case ANEG_STATE_AN_ENABLE:
4855 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4856 if (ap->flags & MR_AN_ENABLE) {
4859 ap->ability_match_cfg = 0;
4860 ap->ability_match_count = 0;
4861 ap->ability_match = 0;
4865 ap->state = ANEG_STATE_RESTART_INIT;
4867 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4871 case ANEG_STATE_RESTART_INIT:
4872 ap->link_time = ap->cur_time;
4873 ap->flags &= ~(MR_NP_LOADED);
4875 tw32(MAC_TX_AUTO_NEG, 0);
4876 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4877 tw32_f(MAC_MODE, tp->mac_mode);
4880 ret = ANEG_TIMER_ENAB;
4881 ap->state = ANEG_STATE_RESTART;
4884 case ANEG_STATE_RESTART:
4885 delta = ap->cur_time - ap->link_time;
4886 if (delta > ANEG_STATE_SETTLE_TIME)
4887 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4889 ret = ANEG_TIMER_ENAB;
4892 case ANEG_STATE_DISABLE_LINK_OK:
4896 case ANEG_STATE_ABILITY_DETECT_INIT:
4897 ap->flags &= ~(MR_TOGGLE_TX);
4898 ap->txconfig = ANEG_CFG_FD;
4899 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4900 if (flowctrl & ADVERTISE_1000XPAUSE)
4901 ap->txconfig |= ANEG_CFG_PS1;
4902 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4903 ap->txconfig |= ANEG_CFG_PS2;
4904 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4905 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4906 tw32_f(MAC_MODE, tp->mac_mode);
4909 ap->state = ANEG_STATE_ABILITY_DETECT;
4912 case ANEG_STATE_ABILITY_DETECT:
4913 if (ap->ability_match != 0 && ap->rxconfig != 0)
4914 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4917 case ANEG_STATE_ACK_DETECT_INIT:
4918 ap->txconfig |= ANEG_CFG_ACK;
4919 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4920 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4921 tw32_f(MAC_MODE, tp->mac_mode);
4924 ap->state = ANEG_STATE_ACK_DETECT;
4927 case ANEG_STATE_ACK_DETECT:
4928 if (ap->ack_match != 0) {
4929 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4930 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4931 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4933 ap->state = ANEG_STATE_AN_ENABLE;
4935 } else if (ap->ability_match != 0 &&
4936 ap->rxconfig == 0) {
4937 ap->state = ANEG_STATE_AN_ENABLE;
4941 case ANEG_STATE_COMPLETE_ACK_INIT:
4942 if (ap->rxconfig & ANEG_CFG_INVAL) {
4946 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4947 MR_LP_ADV_HALF_DUPLEX |
4948 MR_LP_ADV_SYM_PAUSE |
4949 MR_LP_ADV_ASYM_PAUSE |
4950 MR_LP_ADV_REMOTE_FAULT1 |
4951 MR_LP_ADV_REMOTE_FAULT2 |
4952 MR_LP_ADV_NEXT_PAGE |
4955 if (ap->rxconfig & ANEG_CFG_FD)
4956 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4957 if (ap->rxconfig & ANEG_CFG_HD)
4958 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4959 if (ap->rxconfig & ANEG_CFG_PS1)
4960 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4961 if (ap->rxconfig & ANEG_CFG_PS2)
4962 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4963 if (ap->rxconfig & ANEG_CFG_RF1)
4964 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4965 if (ap->rxconfig & ANEG_CFG_RF2)
4966 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4967 if (ap->rxconfig & ANEG_CFG_NP)
4968 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4970 ap->link_time = ap->cur_time;
4972 ap->flags ^= (MR_TOGGLE_TX);
4973 if (ap->rxconfig & 0x0008)
4974 ap->flags |= MR_TOGGLE_RX;
4975 if (ap->rxconfig & ANEG_CFG_NP)
4976 ap->flags |= MR_NP_RX;
4977 ap->flags |= MR_PAGE_RX;
4979 ap->state = ANEG_STATE_COMPLETE_ACK;
4980 ret = ANEG_TIMER_ENAB;
4983 case ANEG_STATE_COMPLETE_ACK:
4984 if (ap->ability_match != 0 &&
4985 ap->rxconfig == 0) {
4986 ap->state = ANEG_STATE_AN_ENABLE;
4989 delta = ap->cur_time - ap->link_time;
4990 if (delta > ANEG_STATE_SETTLE_TIME) {
4991 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4992 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4994 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4995 !(ap->flags & MR_NP_RX)) {
4996 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5004 case ANEG_STATE_IDLE_DETECT_INIT:
5005 ap->link_time = ap->cur_time;
5006 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5007 tw32_f(MAC_MODE, tp->mac_mode);
5010 ap->state = ANEG_STATE_IDLE_DETECT;
5011 ret = ANEG_TIMER_ENAB;
5014 case ANEG_STATE_IDLE_DETECT:
5015 if (ap->ability_match != 0 &&
5016 ap->rxconfig == 0) {
5017 ap->state = ANEG_STATE_AN_ENABLE;
5020 delta = ap->cur_time - ap->link_time;
5021 if (delta > ANEG_STATE_SETTLE_TIME) {
5022 /* XXX another gem from the Broadcom driver :( */
5023 ap->state = ANEG_STATE_LINK_OK;
5027 case ANEG_STATE_LINK_OK:
5028 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5032 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5033 /* ??? unimplemented */
5036 case ANEG_STATE_NEXT_PAGE_WAIT:
5037 /* ??? unimplemented */
5048 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5051 struct tg3_fiber_aneginfo aninfo;
5052 int status = ANEG_FAILED;
5056 tw32_f(MAC_TX_AUTO_NEG, 0);
5058 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5059 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5062 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5065 memset(&aninfo, 0, sizeof(aninfo));
5066 aninfo.flags |= MR_AN_ENABLE;
5067 aninfo.state = ANEG_STATE_UNKNOWN;
5068 aninfo.cur_time = 0;
5070 while (++tick < 195000) {
5071 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5072 if (status == ANEG_DONE || status == ANEG_FAILED)
5078 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5079 tw32_f(MAC_MODE, tp->mac_mode);
5082 *txflags = aninfo.txconfig;
5083 *rxflags = aninfo.flags;
5085 if (status == ANEG_DONE &&
5086 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5087 MR_LP_ADV_FULL_DUPLEX)))
5093 static void tg3_init_bcm8002(struct tg3 *tp)
5095 u32 mac_status = tr32(MAC_STATUS);
5098 /* Reset when initting first time or we have a link. */
5099 if (tg3_flag(tp, INIT_COMPLETE) &&
5100 !(mac_status & MAC_STATUS_PCS_SYNCED))
5103 /* Set PLL lock range. */
5104 tg3_writephy(tp, 0x16, 0x8007);
5107 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5109 /* Wait for reset to complete. */
5110 /* XXX schedule_timeout() ... */
5111 for (i = 0; i < 500; i++)
5114 /* Config mode; select PMA/Ch 1 regs. */
5115 tg3_writephy(tp, 0x10, 0x8411);
5117 /* Enable auto-lock and comdet, select txclk for tx. */
5118 tg3_writephy(tp, 0x11, 0x0a10);
5120 tg3_writephy(tp, 0x18, 0x00a0);
5121 tg3_writephy(tp, 0x16, 0x41ff);
5123 /* Assert and deassert POR. */
5124 tg3_writephy(tp, 0x13, 0x0400);
5126 tg3_writephy(tp, 0x13, 0x0000);
5128 tg3_writephy(tp, 0x11, 0x0a50);
5130 tg3_writephy(tp, 0x11, 0x0a10);
5132 /* Wait for signal to stabilize */
5133 /* XXX schedule_timeout() ... */
5134 for (i = 0; i < 15000; i++)
5137 /* Deselect the channel register so we can read the PHYID
5140 tg3_writephy(tp, 0x10, 0x8011);
5143 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5146 u32 sg_dig_ctrl, sg_dig_status;
5147 u32 serdes_cfg, expected_sg_dig_ctrl;
5148 int workaround, port_a;
5149 int current_link_up;
5152 expected_sg_dig_ctrl = 0;
5155 current_link_up = 0;
5157 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5158 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5160 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5163 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5164 /* preserve bits 20-23 for voltage regulator */
5165 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5168 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5170 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5171 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5173 u32 val = serdes_cfg;
5179 tw32_f(MAC_SERDES_CFG, val);
5182 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5184 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5185 tg3_setup_flow_control(tp, 0, 0);
5186 current_link_up = 1;
5191 /* Want auto-negotiation. */
5192 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5194 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5195 if (flowctrl & ADVERTISE_1000XPAUSE)
5196 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5197 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5198 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5200 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5201 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5202 tp->serdes_counter &&
5203 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5204 MAC_STATUS_RCVD_CFG)) ==
5205 MAC_STATUS_PCS_SYNCED)) {
5206 tp->serdes_counter--;
5207 current_link_up = 1;
5212 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5213 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5215 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5217 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5218 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5219 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5220 MAC_STATUS_SIGNAL_DET)) {
5221 sg_dig_status = tr32(SG_DIG_STATUS);
5222 mac_status = tr32(MAC_STATUS);
5224 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5225 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5226 u32 local_adv = 0, remote_adv = 0;
5228 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5229 local_adv |= ADVERTISE_1000XPAUSE;
5230 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5231 local_adv |= ADVERTISE_1000XPSE_ASYM;
5233 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5234 remote_adv |= LPA_1000XPAUSE;
5235 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5236 remote_adv |= LPA_1000XPAUSE_ASYM;
5238 tp->link_config.rmt_adv =
5239 mii_adv_to_ethtool_adv_x(remote_adv);
5241 tg3_setup_flow_control(tp, local_adv, remote_adv);
5242 current_link_up = 1;
5243 tp->serdes_counter = 0;
5244 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5245 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5246 if (tp->serdes_counter)
5247 tp->serdes_counter--;
5250 u32 val = serdes_cfg;
5257 tw32_f(MAC_SERDES_CFG, val);
5260 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5263 /* Link parallel detection - link is up */
5264 /* only if we have PCS_SYNC and not */
5265 /* receiving config code words */
5266 mac_status = tr32(MAC_STATUS);
5267 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5268 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5269 tg3_setup_flow_control(tp, 0, 0);
5270 current_link_up = 1;
5272 TG3_PHYFLG_PARALLEL_DETECT;
5273 tp->serdes_counter =
5274 SERDES_PARALLEL_DET_TIMEOUT;
5276 goto restart_autoneg;
5280 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5281 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5285 return current_link_up;
5288 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5290 int current_link_up = 0;
5292 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5295 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5296 u32 txflags, rxflags;
5299 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5300 u32 local_adv = 0, remote_adv = 0;
5302 if (txflags & ANEG_CFG_PS1)
5303 local_adv |= ADVERTISE_1000XPAUSE;
5304 if (txflags & ANEG_CFG_PS2)
5305 local_adv |= ADVERTISE_1000XPSE_ASYM;
5307 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5308 remote_adv |= LPA_1000XPAUSE;
5309 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5310 remote_adv |= LPA_1000XPAUSE_ASYM;
5312 tp->link_config.rmt_adv =
5313 mii_adv_to_ethtool_adv_x(remote_adv);
5315 tg3_setup_flow_control(tp, local_adv, remote_adv);
5317 current_link_up = 1;
5319 for (i = 0; i < 30; i++) {
5322 (MAC_STATUS_SYNC_CHANGED |
5323 MAC_STATUS_CFG_CHANGED));
5325 if ((tr32(MAC_STATUS) &
5326 (MAC_STATUS_SYNC_CHANGED |
5327 MAC_STATUS_CFG_CHANGED)) == 0)
5331 mac_status = tr32(MAC_STATUS);
5332 if (current_link_up == 0 &&
5333 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5334 !(mac_status & MAC_STATUS_RCVD_CFG))
5335 current_link_up = 1;
5337 tg3_setup_flow_control(tp, 0, 0);
5339 /* Forcing 1000FD link up. */
5340 current_link_up = 1;
5342 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5345 tw32_f(MAC_MODE, tp->mac_mode);
5350 return current_link_up;
5353 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5356 u16 orig_active_speed;
5357 u8 orig_active_duplex;
5359 int current_link_up;
5362 orig_pause_cfg = tp->link_config.active_flowctrl;
5363 orig_active_speed = tp->link_config.active_speed;
5364 orig_active_duplex = tp->link_config.active_duplex;
5366 if (!tg3_flag(tp, HW_AUTONEG) &&
5368 tg3_flag(tp, INIT_COMPLETE)) {
5369 mac_status = tr32(MAC_STATUS);
5370 mac_status &= (MAC_STATUS_PCS_SYNCED |
5371 MAC_STATUS_SIGNAL_DET |
5372 MAC_STATUS_CFG_CHANGED |
5373 MAC_STATUS_RCVD_CFG);
5374 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5375 MAC_STATUS_SIGNAL_DET)) {
5376 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5377 MAC_STATUS_CFG_CHANGED));
5382 tw32_f(MAC_TX_AUTO_NEG, 0);
5384 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5385 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5386 tw32_f(MAC_MODE, tp->mac_mode);
5389 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5390 tg3_init_bcm8002(tp);
5392 /* Enable link change event even when serdes polling. */
5393 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5396 current_link_up = 0;
5397 tp->link_config.rmt_adv = 0;
5398 mac_status = tr32(MAC_STATUS);
5400 if (tg3_flag(tp, HW_AUTONEG))
5401 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5403 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5405 tp->napi[0].hw_status->status =
5406 (SD_STATUS_UPDATED |
5407 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5409 for (i = 0; i < 100; i++) {
5410 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5411 MAC_STATUS_CFG_CHANGED));
5413 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5414 MAC_STATUS_CFG_CHANGED |
5415 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5419 mac_status = tr32(MAC_STATUS);
5420 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5421 current_link_up = 0;
5422 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5423 tp->serdes_counter == 0) {
5424 tw32_f(MAC_MODE, (tp->mac_mode |
5425 MAC_MODE_SEND_CONFIGS));
5427 tw32_f(MAC_MODE, tp->mac_mode);
5431 if (current_link_up == 1) {
5432 tp->link_config.active_speed = SPEED_1000;
5433 tp->link_config.active_duplex = DUPLEX_FULL;
5434 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5435 LED_CTRL_LNKLED_OVERRIDE |
5436 LED_CTRL_1000MBPS_ON));
5438 tp->link_config.active_speed = SPEED_UNKNOWN;
5439 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5440 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5441 LED_CTRL_LNKLED_OVERRIDE |
5442 LED_CTRL_TRAFFIC_OVERRIDE));
5445 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5446 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5447 if (orig_pause_cfg != now_pause_cfg ||
5448 orig_active_speed != tp->link_config.active_speed ||
5449 orig_active_duplex != tp->link_config.active_duplex)
5450 tg3_link_report(tp);
5456 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5458 int current_link_up, err = 0;
5462 u32 local_adv, remote_adv;
5464 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5465 tw32_f(MAC_MODE, tp->mac_mode);
5471 (MAC_STATUS_SYNC_CHANGED |
5472 MAC_STATUS_CFG_CHANGED |
5473 MAC_STATUS_MI_COMPLETION |
5474 MAC_STATUS_LNKSTATE_CHANGED));
5480 current_link_up = 0;
5481 current_speed = SPEED_UNKNOWN;
5482 current_duplex = DUPLEX_UNKNOWN;
5483 tp->link_config.rmt_adv = 0;
5485 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5486 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5487 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5488 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5489 bmsr |= BMSR_LSTATUS;
5491 bmsr &= ~BMSR_LSTATUS;
5494 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5496 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5497 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5498 /* do nothing, just check for link up at the end */
5499 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5502 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5503 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5504 ADVERTISE_1000XPAUSE |
5505 ADVERTISE_1000XPSE_ASYM |
5508 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5509 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5511 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5512 tg3_writephy(tp, MII_ADVERTISE, newadv);
5513 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5514 tg3_writephy(tp, MII_BMCR, bmcr);
5516 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5517 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5518 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5525 bmcr &= ~BMCR_SPEED1000;
5526 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5528 if (tp->link_config.duplex == DUPLEX_FULL)
5529 new_bmcr |= BMCR_FULLDPLX;
5531 if (new_bmcr != bmcr) {
5532 /* BMCR_SPEED1000 is a reserved bit that needs
5533 * to be set on write.
5535 new_bmcr |= BMCR_SPEED1000;
5537 /* Force a linkdown */
5541 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5542 adv &= ~(ADVERTISE_1000XFULL |
5543 ADVERTISE_1000XHALF |
5545 tg3_writephy(tp, MII_ADVERTISE, adv);
5546 tg3_writephy(tp, MII_BMCR, bmcr |
5550 tg3_carrier_off(tp);
5552 tg3_writephy(tp, MII_BMCR, new_bmcr);
5554 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5555 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5556 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5557 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5558 bmsr |= BMSR_LSTATUS;
5560 bmsr &= ~BMSR_LSTATUS;
5562 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5566 if (bmsr & BMSR_LSTATUS) {
5567 current_speed = SPEED_1000;
5568 current_link_up = 1;
5569 if (bmcr & BMCR_FULLDPLX)
5570 current_duplex = DUPLEX_FULL;
5572 current_duplex = DUPLEX_HALF;
5577 if (bmcr & BMCR_ANENABLE) {
5580 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5581 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5582 common = local_adv & remote_adv;
5583 if (common & (ADVERTISE_1000XHALF |
5584 ADVERTISE_1000XFULL)) {
5585 if (common & ADVERTISE_1000XFULL)
5586 current_duplex = DUPLEX_FULL;
5588 current_duplex = DUPLEX_HALF;
5590 tp->link_config.rmt_adv =
5591 mii_adv_to_ethtool_adv_x(remote_adv);
5592 } else if (!tg3_flag(tp, 5780_CLASS)) {
5593 /* Link is up via parallel detect */
5595 current_link_up = 0;
5600 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5601 tg3_setup_flow_control(tp, local_adv, remote_adv);
5603 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5604 if (tp->link_config.active_duplex == DUPLEX_HALF)
5605 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5607 tw32_f(MAC_MODE, tp->mac_mode);
5610 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5612 tp->link_config.active_speed = current_speed;
5613 tp->link_config.active_duplex = current_duplex;
5615 tg3_test_and_report_link_chg(tp, current_link_up);
5619 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5621 if (tp->serdes_counter) {
5622 /* Give autoneg time to complete. */
5623 tp->serdes_counter--;
5628 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5631 tg3_readphy(tp, MII_BMCR, &bmcr);
5632 if (bmcr & BMCR_ANENABLE) {
5635 /* Select shadow register 0x1f */
5636 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5637 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5639 /* Select expansion interrupt status register */
5640 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5641 MII_TG3_DSP_EXP1_INT_STAT);
5642 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5643 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5645 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5646 /* We have signal detect and not receiving
5647 * config code words, link is up by parallel
5651 bmcr &= ~BMCR_ANENABLE;
5652 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5653 tg3_writephy(tp, MII_BMCR, bmcr);
5654 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5657 } else if (tp->link_up &&
5658 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5659 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5662 /* Select expansion interrupt status register */
5663 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5664 MII_TG3_DSP_EXP1_INT_STAT);
5665 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5669 /* Config code words received, turn on autoneg. */
5670 tg3_readphy(tp, MII_BMCR, &bmcr);
5671 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5673 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5679 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5684 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5685 err = tg3_setup_fiber_phy(tp, force_reset);
5686 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5687 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5689 err = tg3_setup_copper_phy(tp, force_reset);
5691 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5694 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5695 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5697 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5702 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5703 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5704 tw32(GRC_MISC_CFG, val);
5707 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5708 (6 << TX_LENGTHS_IPG_SHIFT);
5709 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5710 tg3_asic_rev(tp) == ASIC_REV_5762)
5711 val |= tr32(MAC_TX_LENGTHS) &
5712 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5713 TX_LENGTHS_CNT_DWN_VAL_MSK);
5715 if (tp->link_config.active_speed == SPEED_1000 &&
5716 tp->link_config.active_duplex == DUPLEX_HALF)
5717 tw32(MAC_TX_LENGTHS, val |
5718 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5720 tw32(MAC_TX_LENGTHS, val |
5721 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5723 if (!tg3_flag(tp, 5705_PLUS)) {
5725 tw32(HOSTCC_STAT_COAL_TICKS,
5726 tp->coal.stats_block_coalesce_usecs);
5728 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5732 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5733 val = tr32(PCIE_PWR_MGMT_THRESH);
5735 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5738 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5739 tw32(PCIE_PWR_MGMT_THRESH, val);
5745 /* tp->lock must be held */
5746 static u64 tg3_refclk_read(struct tg3 *tp)
5748 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5749 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5752 /* tp->lock must be held */
5753 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5755 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5756 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5757 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5758 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5761 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5762 static inline void tg3_full_unlock(struct tg3 *tp);
5763 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5765 struct tg3 *tp = netdev_priv(dev);
5767 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5768 SOF_TIMESTAMPING_RX_SOFTWARE |
5769 SOF_TIMESTAMPING_SOFTWARE |
5770 SOF_TIMESTAMPING_TX_HARDWARE |
5771 SOF_TIMESTAMPING_RX_HARDWARE |
5772 SOF_TIMESTAMPING_RAW_HARDWARE;
5775 info->phc_index = ptp_clock_index(tp->ptp_clock);
5777 info->phc_index = -1;
5779 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5781 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5782 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5783 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5784 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5788 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5790 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5791 bool neg_adj = false;
5799 /* Frequency adjustment is performed using hardware with a 24 bit
5800 * accumulator and a programmable correction value. On each clk, the
5801 * correction value gets added to the accumulator and when it
5802 * overflows, the time counter is incremented/decremented.
5804 * So conversion from ppb to correction value is
5805 * ppb * (1 << 24) / 1000000000
5807 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5808 TG3_EAV_REF_CLK_CORRECT_MASK;
5810 tg3_full_lock(tp, 0);
5813 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5814 TG3_EAV_REF_CLK_CORRECT_EN |
5815 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5817 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5819 tg3_full_unlock(tp);
5824 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5826 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5828 tg3_full_lock(tp, 0);
5829 tp->ptp_adjust += delta;
5830 tg3_full_unlock(tp);
5835 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5839 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5841 tg3_full_lock(tp, 0);
5842 ns = tg3_refclk_read(tp);
5843 ns += tp->ptp_adjust;
5844 tg3_full_unlock(tp);
5846 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5847 ts->tv_nsec = remainder;
5852 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5853 const struct timespec *ts)
5856 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5858 ns = timespec_to_ns(ts);
5860 tg3_full_lock(tp, 0);
5861 tg3_refclk_write(tp, ns);
5863 tg3_full_unlock(tp);
5868 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5869 struct ptp_clock_request *rq, int on)
5874 static const struct ptp_clock_info tg3_ptp_caps = {
5875 .owner = THIS_MODULE,
5876 .name = "tg3 clock",
5877 .max_adj = 250000000,
5882 .adjfreq = tg3_ptp_adjfreq,
5883 .adjtime = tg3_ptp_adjtime,
5884 .gettime = tg3_ptp_gettime,
5885 .settime = tg3_ptp_settime,
5886 .enable = tg3_ptp_enable,
5889 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5890 struct skb_shared_hwtstamps *timestamp)
5892 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5893 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5897 /* tp->lock must be held */
5898 static void tg3_ptp_init(struct tg3 *tp)
5900 if (!tg3_flag(tp, PTP_CAPABLE))
5903 /* Initialize the hardware clock to the system time. */
5904 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5906 tp->ptp_info = tg3_ptp_caps;
5909 /* tp->lock must be held */
5910 static void tg3_ptp_resume(struct tg3 *tp)
5912 if (!tg3_flag(tp, PTP_CAPABLE))
5915 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5919 static void tg3_ptp_fini(struct tg3 *tp)
5921 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5924 ptp_clock_unregister(tp->ptp_clock);
5925 tp->ptp_clock = NULL;
5929 static inline int tg3_irq_sync(struct tg3 *tp)
5931 return tp->irq_sync;
5934 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5938 dst = (u32 *)((u8 *)dst + off);
5939 for (i = 0; i < len; i += sizeof(u32))
5940 *dst++ = tr32(off + i);
5943 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5945 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5946 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5947 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5948 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5949 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5950 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5951 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5952 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5953 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5954 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5955 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5956 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5957 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5958 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5959 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5960 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5961 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5962 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5963 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5965 if (tg3_flag(tp, SUPPORT_MSIX))
5966 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5968 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5969 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5970 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5971 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5972 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5973 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5974 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5975 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5977 if (!tg3_flag(tp, 5705_PLUS)) {
5978 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5979 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5980 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5983 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5984 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5985 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5986 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5987 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5989 if (tg3_flag(tp, NVRAM))
5990 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5993 static void tg3_dump_state(struct tg3 *tp)
5998 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6002 if (tg3_flag(tp, PCI_EXPRESS)) {
6003 /* Read up to but not including private PCI registers */
6004 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6005 regs[i / sizeof(u32)] = tr32(i);
6007 tg3_dump_legacy_regs(tp, regs);
6009 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6010 if (!regs[i + 0] && !regs[i + 1] &&
6011 !regs[i + 2] && !regs[i + 3])
6014 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6016 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6021 for (i = 0; i < tp->irq_cnt; i++) {
6022 struct tg3_napi *tnapi = &tp->napi[i];
6024 /* SW status block */
6026 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6028 tnapi->hw_status->status,
6029 tnapi->hw_status->status_tag,
6030 tnapi->hw_status->rx_jumbo_consumer,
6031 tnapi->hw_status->rx_consumer,
6032 tnapi->hw_status->rx_mini_consumer,
6033 tnapi->hw_status->idx[0].rx_producer,
6034 tnapi->hw_status->idx[0].tx_consumer);
6037 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6039 tnapi->last_tag, tnapi->last_irq_tag,
6040 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6042 tnapi->prodring.rx_std_prod_idx,
6043 tnapi->prodring.rx_std_cons_idx,
6044 tnapi->prodring.rx_jmb_prod_idx,
6045 tnapi->prodring.rx_jmb_cons_idx);
6049 /* This is called whenever we suspect that the system chipset is re-
6050 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6051 * is bogus tx completions. We try to recover by setting the
6052 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6055 static void tg3_tx_recover(struct tg3 *tp)
6057 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6058 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6060 netdev_warn(tp->dev,
6061 "The system may be re-ordering memory-mapped I/O "
6062 "cycles to the network device, attempting to recover. "
6063 "Please report the problem to the driver maintainer "
6064 "and include system chipset information.\n");
6066 spin_lock(&tp->lock);
6067 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6068 spin_unlock(&tp->lock);
6071 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6073 /* Tell compiler to fetch tx indices from memory. */
6075 return tnapi->tx_pending -
6076 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6079 /* Tigon3 never reports partial packet sends. So we do not
6080 * need special logic to handle SKBs that have not had all
6081 * of their frags sent yet, like SunGEM does.
6083 static void tg3_tx(struct tg3_napi *tnapi)
6085 struct tg3 *tp = tnapi->tp;
6086 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6087 u32 sw_idx = tnapi->tx_cons;
6088 struct netdev_queue *txq;
6089 int index = tnapi - tp->napi;
6090 unsigned int pkts_compl = 0, bytes_compl = 0;
6092 if (tg3_flag(tp, ENABLE_TSS))
6095 txq = netdev_get_tx_queue(tp->dev, index);
6097 while (sw_idx != hw_idx) {
6098 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6099 struct sk_buff *skb = ri->skb;
6102 if (unlikely(skb == NULL)) {
6107 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6108 struct skb_shared_hwtstamps timestamp;
6109 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6110 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6112 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6114 skb_tstamp_tx(skb, ×tamp);
6117 pci_unmap_single(tp->pdev,
6118 dma_unmap_addr(ri, mapping),
6124 while (ri->fragmented) {
6125 ri->fragmented = false;
6126 sw_idx = NEXT_TX(sw_idx);
6127 ri = &tnapi->tx_buffers[sw_idx];
6130 sw_idx = NEXT_TX(sw_idx);
6132 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6133 ri = &tnapi->tx_buffers[sw_idx];
6134 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6137 pci_unmap_page(tp->pdev,
6138 dma_unmap_addr(ri, mapping),
6139 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6142 while (ri->fragmented) {
6143 ri->fragmented = false;
6144 sw_idx = NEXT_TX(sw_idx);
6145 ri = &tnapi->tx_buffers[sw_idx];
6148 sw_idx = NEXT_TX(sw_idx);
6152 bytes_compl += skb->len;
6156 if (unlikely(tx_bug)) {
6162 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6164 tnapi->tx_cons = sw_idx;
6166 /* Need to make the tx_cons update visible to tg3_start_xmit()
6167 * before checking for netif_queue_stopped(). Without the
6168 * memory barrier, there is a small possibility that tg3_start_xmit()
6169 * will miss it and cause the queue to be stopped forever.
6173 if (unlikely(netif_tx_queue_stopped(txq) &&
6174 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6175 __netif_tx_lock(txq, smp_processor_id());
6176 if (netif_tx_queue_stopped(txq) &&
6177 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6178 netif_tx_wake_queue(txq);
6179 __netif_tx_unlock(txq);
6183 static void tg3_frag_free(bool is_frag, void *data)
6186 put_page(virt_to_head_page(data));
6191 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6193 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6194 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6199 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6200 map_sz, PCI_DMA_FROMDEVICE);
6201 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6206 /* Returns size of skb allocated or < 0 on error.
6208 * We only need to fill in the address because the other members
6209 * of the RX descriptor are invariant, see tg3_init_rings.
6211 * Note the purposeful assymetry of cpu vs. chip accesses. For
6212 * posting buffers we only dirty the first cache line of the RX
6213 * descriptor (containing the address). Whereas for the RX status
6214 * buffers the cpu only reads the last cacheline of the RX descriptor
6215 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6217 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6218 u32 opaque_key, u32 dest_idx_unmasked,
6219 unsigned int *frag_size)
6221 struct tg3_rx_buffer_desc *desc;
6222 struct ring_info *map;
6225 int skb_size, data_size, dest_idx;
6227 switch (opaque_key) {
6228 case RXD_OPAQUE_RING_STD:
6229 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6230 desc = &tpr->rx_std[dest_idx];
6231 map = &tpr->rx_std_buffers[dest_idx];
6232 data_size = tp->rx_pkt_map_sz;
6235 case RXD_OPAQUE_RING_JUMBO:
6236 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6237 desc = &tpr->rx_jmb[dest_idx].std;
6238 map = &tpr->rx_jmb_buffers[dest_idx];
6239 data_size = TG3_RX_JMB_MAP_SZ;
6246 /* Do not overwrite any of the map or rp information
6247 * until we are sure we can commit to a new buffer.
6249 * Callers depend upon this behavior and assume that
6250 * we leave everything unchanged if we fail.
6252 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6253 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6254 if (skb_size <= PAGE_SIZE) {
6255 data = netdev_alloc_frag(skb_size);
6256 *frag_size = skb_size;
6258 data = kmalloc(skb_size, GFP_ATOMIC);
6264 mapping = pci_map_single(tp->pdev,
6265 data + TG3_RX_OFFSET(tp),
6267 PCI_DMA_FROMDEVICE);
6268 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6269 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6274 dma_unmap_addr_set(map, mapping, mapping);
6276 desc->addr_hi = ((u64)mapping >> 32);
6277 desc->addr_lo = ((u64)mapping & 0xffffffff);
6282 /* We only need to move over in the address because the other
6283 * members of the RX descriptor are invariant. See notes above
6284 * tg3_alloc_rx_data for full details.
6286 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6287 struct tg3_rx_prodring_set *dpr,
6288 u32 opaque_key, int src_idx,
6289 u32 dest_idx_unmasked)
6291 struct tg3 *tp = tnapi->tp;
6292 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6293 struct ring_info *src_map, *dest_map;
6294 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6297 switch (opaque_key) {
6298 case RXD_OPAQUE_RING_STD:
6299 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6300 dest_desc = &dpr->rx_std[dest_idx];
6301 dest_map = &dpr->rx_std_buffers[dest_idx];
6302 src_desc = &spr->rx_std[src_idx];
6303 src_map = &spr->rx_std_buffers[src_idx];
6306 case RXD_OPAQUE_RING_JUMBO:
6307 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6308 dest_desc = &dpr->rx_jmb[dest_idx].std;
6309 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6310 src_desc = &spr->rx_jmb[src_idx].std;
6311 src_map = &spr->rx_jmb_buffers[src_idx];
6318 dest_map->data = src_map->data;
6319 dma_unmap_addr_set(dest_map, mapping,
6320 dma_unmap_addr(src_map, mapping));
6321 dest_desc->addr_hi = src_desc->addr_hi;
6322 dest_desc->addr_lo = src_desc->addr_lo;
6324 /* Ensure that the update to the skb happens after the physical
6325 * addresses have been transferred to the new BD location.
6329 src_map->data = NULL;
6332 /* The RX ring scheme is composed of multiple rings which post fresh
6333 * buffers to the chip, and one special ring the chip uses to report
6334 * status back to the host.
6336 * The special ring reports the status of received packets to the
6337 * host. The chip does not write into the original descriptor the
6338 * RX buffer was obtained from. The chip simply takes the original
6339 * descriptor as provided by the host, updates the status and length
6340 * field, then writes this into the next status ring entry.
6342 * Each ring the host uses to post buffers to the chip is described
6343 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6344 * it is first placed into the on-chip ram. When the packet's length
6345 * is known, it walks down the TG3_BDINFO entries to select the ring.
6346 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6347 * which is within the range of the new packet's length is chosen.
6349 * The "separate ring for rx status" scheme may sound queer, but it makes
6350 * sense from a cache coherency perspective. If only the host writes
6351 * to the buffer post rings, and only the chip writes to the rx status
6352 * rings, then cache lines never move beyond shared-modified state.
6353 * If both the host and chip were to write into the same ring, cache line
6354 * eviction could occur since both entities want it in an exclusive state.
6356 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6358 struct tg3 *tp = tnapi->tp;
6359 u32 work_mask, rx_std_posted = 0;
6360 u32 std_prod_idx, jmb_prod_idx;
6361 u32 sw_idx = tnapi->rx_rcb_ptr;
6364 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6366 hw_idx = *(tnapi->rx_rcb_prod_idx);
6368 * We need to order the read of hw_idx and the read of
6369 * the opaque cookie.
6374 std_prod_idx = tpr->rx_std_prod_idx;
6375 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6376 while (sw_idx != hw_idx && budget > 0) {
6377 struct ring_info *ri;
6378 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6380 struct sk_buff *skb;
6381 dma_addr_t dma_addr;
6382 u32 opaque_key, desc_idx, *post_ptr;
6386 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6387 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6388 if (opaque_key == RXD_OPAQUE_RING_STD) {
6389 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6390 dma_addr = dma_unmap_addr(ri, mapping);
6392 post_ptr = &std_prod_idx;
6394 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6395 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6396 dma_addr = dma_unmap_addr(ri, mapping);
6398 post_ptr = &jmb_prod_idx;
6400 goto next_pkt_nopost;
6402 work_mask |= opaque_key;
6404 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6405 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6407 tg3_recycle_rx(tnapi, tpr, opaque_key,
6408 desc_idx, *post_ptr);
6410 /* Other statistics kept track of by card. */
6415 prefetch(data + TG3_RX_OFFSET(tp));
6416 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6419 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6420 RXD_FLAG_PTPSTAT_PTPV1 ||
6421 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6422 RXD_FLAG_PTPSTAT_PTPV2) {
6423 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6424 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6427 if (len > TG3_RX_COPY_THRESH(tp)) {
6429 unsigned int frag_size;
6431 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6432 *post_ptr, &frag_size);
6436 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6437 PCI_DMA_FROMDEVICE);
6439 skb = build_skb(data, frag_size);
6441 tg3_frag_free(frag_size != 0, data);
6442 goto drop_it_no_recycle;
6444 skb_reserve(skb, TG3_RX_OFFSET(tp));
6445 /* Ensure that the update to the data happens
6446 * after the usage of the old DMA mapping.
6453 tg3_recycle_rx(tnapi, tpr, opaque_key,
6454 desc_idx, *post_ptr);
6456 skb = netdev_alloc_skb(tp->dev,
6457 len + TG3_RAW_IP_ALIGN);
6459 goto drop_it_no_recycle;
6461 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6462 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6464 data + TG3_RX_OFFSET(tp),
6466 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6471 tg3_hwclock_to_timestamp(tp, tstamp,
6472 skb_hwtstamps(skb));
6474 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6475 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6476 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6477 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6478 skb->ip_summed = CHECKSUM_UNNECESSARY;
6480 skb_checksum_none_assert(skb);
6482 skb->protocol = eth_type_trans(skb, tp->dev);
6484 if (len > (tp->dev->mtu + ETH_HLEN) &&
6485 skb->protocol != htons(ETH_P_8021Q)) {
6487 goto drop_it_no_recycle;
6490 if (desc->type_flags & RXD_FLAG_VLAN &&
6491 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6492 __vlan_hwaccel_put_tag(skb,
6493 desc->err_vlan & RXD_VLAN_MASK);
6495 napi_gro_receive(&tnapi->napi, skb);
6503 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6504 tpr->rx_std_prod_idx = std_prod_idx &
6505 tp->rx_std_ring_mask;
6506 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6507 tpr->rx_std_prod_idx);
6508 work_mask &= ~RXD_OPAQUE_RING_STD;
6513 sw_idx &= tp->rx_ret_ring_mask;
6515 /* Refresh hw_idx to see if there is new work */
6516 if (sw_idx == hw_idx) {
6517 hw_idx = *(tnapi->rx_rcb_prod_idx);
6522 /* ACK the status ring. */
6523 tnapi->rx_rcb_ptr = sw_idx;
6524 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6526 /* Refill RX ring(s). */
6527 if (!tg3_flag(tp, ENABLE_RSS)) {
6528 /* Sync BD data before updating mailbox */
6531 if (work_mask & RXD_OPAQUE_RING_STD) {
6532 tpr->rx_std_prod_idx = std_prod_idx &
6533 tp->rx_std_ring_mask;
6534 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6535 tpr->rx_std_prod_idx);
6537 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6538 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6539 tp->rx_jmb_ring_mask;
6540 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6541 tpr->rx_jmb_prod_idx);
6544 } else if (work_mask) {
6545 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6546 * updated before the producer indices can be updated.
6550 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6551 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6553 if (tnapi != &tp->napi[1]) {
6554 tp->rx_refill = true;
6555 napi_schedule(&tp->napi[1].napi);
6562 static void tg3_poll_link(struct tg3 *tp)
6564 /* handle link change and other phy events */
6565 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6566 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6568 if (sblk->status & SD_STATUS_LINK_CHG) {
6569 sblk->status = SD_STATUS_UPDATED |
6570 (sblk->status & ~SD_STATUS_LINK_CHG);
6571 spin_lock(&tp->lock);
6572 if (tg3_flag(tp, USE_PHYLIB)) {
6574 (MAC_STATUS_SYNC_CHANGED |
6575 MAC_STATUS_CFG_CHANGED |
6576 MAC_STATUS_MI_COMPLETION |
6577 MAC_STATUS_LNKSTATE_CHANGED));
6580 tg3_setup_phy(tp, 0);
6581 spin_unlock(&tp->lock);
6586 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6587 struct tg3_rx_prodring_set *dpr,
6588 struct tg3_rx_prodring_set *spr)
6590 u32 si, di, cpycnt, src_prod_idx;
6594 src_prod_idx = spr->rx_std_prod_idx;
6596 /* Make sure updates to the rx_std_buffers[] entries and the
6597 * standard producer index are seen in the correct order.
6601 if (spr->rx_std_cons_idx == src_prod_idx)
6604 if (spr->rx_std_cons_idx < src_prod_idx)
6605 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6607 cpycnt = tp->rx_std_ring_mask + 1 -
6608 spr->rx_std_cons_idx;
6610 cpycnt = min(cpycnt,
6611 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6613 si = spr->rx_std_cons_idx;
6614 di = dpr->rx_std_prod_idx;
6616 for (i = di; i < di + cpycnt; i++) {
6617 if (dpr->rx_std_buffers[i].data) {
6627 /* Ensure that updates to the rx_std_buffers ring and the
6628 * shadowed hardware producer ring from tg3_recycle_skb() are
6629 * ordered correctly WRT the skb check above.
6633 memcpy(&dpr->rx_std_buffers[di],
6634 &spr->rx_std_buffers[si],
6635 cpycnt * sizeof(struct ring_info));
6637 for (i = 0; i < cpycnt; i++, di++, si++) {
6638 struct tg3_rx_buffer_desc *sbd, *dbd;
6639 sbd = &spr->rx_std[si];
6640 dbd = &dpr->rx_std[di];
6641 dbd->addr_hi = sbd->addr_hi;
6642 dbd->addr_lo = sbd->addr_lo;
6645 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6646 tp->rx_std_ring_mask;
6647 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6648 tp->rx_std_ring_mask;
6652 src_prod_idx = spr->rx_jmb_prod_idx;
6654 /* Make sure updates to the rx_jmb_buffers[] entries and
6655 * the jumbo producer index are seen in the correct order.
6659 if (spr->rx_jmb_cons_idx == src_prod_idx)
6662 if (spr->rx_jmb_cons_idx < src_prod_idx)
6663 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6665 cpycnt = tp->rx_jmb_ring_mask + 1 -
6666 spr->rx_jmb_cons_idx;
6668 cpycnt = min(cpycnt,
6669 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6671 si = spr->rx_jmb_cons_idx;
6672 di = dpr->rx_jmb_prod_idx;
6674 for (i = di; i < di + cpycnt; i++) {
6675 if (dpr->rx_jmb_buffers[i].data) {
6685 /* Ensure that updates to the rx_jmb_buffers ring and the
6686 * shadowed hardware producer ring from tg3_recycle_skb() are
6687 * ordered correctly WRT the skb check above.
6691 memcpy(&dpr->rx_jmb_buffers[di],
6692 &spr->rx_jmb_buffers[si],
6693 cpycnt * sizeof(struct ring_info));
6695 for (i = 0; i < cpycnt; i++, di++, si++) {
6696 struct tg3_rx_buffer_desc *sbd, *dbd;
6697 sbd = &spr->rx_jmb[si].std;
6698 dbd = &dpr->rx_jmb[di].std;
6699 dbd->addr_hi = sbd->addr_hi;
6700 dbd->addr_lo = sbd->addr_lo;
6703 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6704 tp->rx_jmb_ring_mask;
6705 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6706 tp->rx_jmb_ring_mask;
6712 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6714 struct tg3 *tp = tnapi->tp;
6716 /* run TX completion thread */
6717 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6719 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6723 if (!tnapi->rx_rcb_prod_idx)
6726 /* run RX thread, within the bounds set by NAPI.
6727 * All RX "locking" is done by ensuring outside
6728 * code synchronizes with tg3->napi.poll()
6730 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6731 work_done += tg3_rx(tnapi, budget - work_done);
6733 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6734 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6736 u32 std_prod_idx = dpr->rx_std_prod_idx;
6737 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6739 tp->rx_refill = false;
6740 for (i = 1; i <= tp->rxq_cnt; i++)
6741 err |= tg3_rx_prodring_xfer(tp, dpr,
6742 &tp->napi[i].prodring);
6746 if (std_prod_idx != dpr->rx_std_prod_idx)
6747 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6748 dpr->rx_std_prod_idx);
6750 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6751 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6752 dpr->rx_jmb_prod_idx);
6757 tw32_f(HOSTCC_MODE, tp->coal_now);
6763 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6765 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6766 schedule_work(&tp->reset_task);
6769 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6771 cancel_work_sync(&tp->reset_task);
6772 tg3_flag_clear(tp, RESET_TASK_PENDING);
6773 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6776 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6778 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6779 struct tg3 *tp = tnapi->tp;
6781 struct tg3_hw_status *sblk = tnapi->hw_status;
6784 work_done = tg3_poll_work(tnapi, work_done, budget);
6786 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6789 if (unlikely(work_done >= budget))
6792 /* tp->last_tag is used in tg3_int_reenable() below
6793 * to tell the hw how much work has been processed,
6794 * so we must read it before checking for more work.
6796 tnapi->last_tag = sblk->status_tag;
6797 tnapi->last_irq_tag = tnapi->last_tag;
6800 /* check for RX/TX work to do */
6801 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6802 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6804 /* This test here is not race free, but will reduce
6805 * the number of interrupts by looping again.
6807 if (tnapi == &tp->napi[1] && tp->rx_refill)
6810 napi_complete(napi);
6811 /* Reenable interrupts. */
6812 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6814 /* This test here is synchronized by napi_schedule()
6815 * and napi_complete() to close the race condition.
6817 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6818 tw32(HOSTCC_MODE, tp->coalesce_mode |
6819 HOSTCC_MODE_ENABLE |
6830 /* work_done is guaranteed to be less than budget. */
6831 napi_complete(napi);
6832 tg3_reset_task_schedule(tp);
6836 static void tg3_process_error(struct tg3 *tp)
6839 bool real_error = false;
6841 if (tg3_flag(tp, ERROR_PROCESSED))
6844 /* Check Flow Attention register */
6845 val = tr32(HOSTCC_FLOW_ATTN);
6846 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6847 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6851 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6852 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6856 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6857 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6866 tg3_flag_set(tp, ERROR_PROCESSED);
6867 tg3_reset_task_schedule(tp);
6870 static int tg3_poll(struct napi_struct *napi, int budget)
6872 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6873 struct tg3 *tp = tnapi->tp;
6875 struct tg3_hw_status *sblk = tnapi->hw_status;
6878 if (sblk->status & SD_STATUS_ERROR)
6879 tg3_process_error(tp);
6883 work_done = tg3_poll_work(tnapi, work_done, budget);
6885 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6888 if (unlikely(work_done >= budget))
6891 if (tg3_flag(tp, TAGGED_STATUS)) {
6892 /* tp->last_tag is used in tg3_int_reenable() below
6893 * to tell the hw how much work has been processed,
6894 * so we must read it before checking for more work.
6896 tnapi->last_tag = sblk->status_tag;
6897 tnapi->last_irq_tag = tnapi->last_tag;
6900 sblk->status &= ~SD_STATUS_UPDATED;
6902 if (likely(!tg3_has_work(tnapi))) {
6903 napi_complete(napi);
6904 tg3_int_reenable(tnapi);
6912 /* work_done is guaranteed to be less than budget. */
6913 napi_complete(napi);
6914 tg3_reset_task_schedule(tp);
6918 static void tg3_napi_disable(struct tg3 *tp)
6922 for (i = tp->irq_cnt - 1; i >= 0; i--)
6923 napi_disable(&tp->napi[i].napi);
6926 static void tg3_napi_enable(struct tg3 *tp)
6930 for (i = 0; i < tp->irq_cnt; i++)
6931 napi_enable(&tp->napi[i].napi);
6934 static void tg3_napi_init(struct tg3 *tp)
6938 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6939 for (i = 1; i < tp->irq_cnt; i++)
6940 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6943 static void tg3_napi_fini(struct tg3 *tp)
6947 for (i = 0; i < tp->irq_cnt; i++)
6948 netif_napi_del(&tp->napi[i].napi);
6951 static inline void tg3_netif_stop(struct tg3 *tp)
6953 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6954 tg3_napi_disable(tp);
6955 netif_carrier_off(tp->dev);
6956 netif_tx_disable(tp->dev);
6959 /* tp->lock must be held */
6960 static inline void tg3_netif_start(struct tg3 *tp)
6964 /* NOTE: unconditional netif_tx_wake_all_queues is only
6965 * appropriate so long as all callers are assured to
6966 * have free tx slots (such as after tg3_init_hw)
6968 netif_tx_wake_all_queues(tp->dev);
6971 netif_carrier_on(tp->dev);
6973 tg3_napi_enable(tp);
6974 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6975 tg3_enable_ints(tp);
6978 static void tg3_irq_quiesce(struct tg3 *tp)
6982 BUG_ON(tp->irq_sync);
6987 for (i = 0; i < tp->irq_cnt; i++)
6988 synchronize_irq(tp->napi[i].irq_vec);
6991 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6992 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6993 * with as well. Most of the time, this is not necessary except when
6994 * shutting down the device.
6996 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6998 spin_lock_bh(&tp->lock);
7000 tg3_irq_quiesce(tp);
7003 static inline void tg3_full_unlock(struct tg3 *tp)
7005 spin_unlock_bh(&tp->lock);
7008 /* One-shot MSI handler - Chip automatically disables interrupt
7009 * after sending MSI so driver doesn't have to do it.
7011 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7013 struct tg3_napi *tnapi = dev_id;
7014 struct tg3 *tp = tnapi->tp;
7016 prefetch(tnapi->hw_status);
7018 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7020 if (likely(!tg3_irq_sync(tp)))
7021 napi_schedule(&tnapi->napi);
7026 /* MSI ISR - No need to check for interrupt sharing and no need to
7027 * flush status block and interrupt mailbox. PCI ordering rules
7028 * guarantee that MSI will arrive after the status block.
7030 static irqreturn_t tg3_msi(int irq, void *dev_id)
7032 struct tg3_napi *tnapi = dev_id;
7033 struct tg3 *tp = tnapi->tp;
7035 prefetch(tnapi->hw_status);
7037 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7039 * Writing any value to intr-mbox-0 clears PCI INTA# and
7040 * chip-internal interrupt pending events.
7041 * Writing non-zero to intr-mbox-0 additional tells the
7042 * NIC to stop sending us irqs, engaging "in-intr-handler"
7045 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7046 if (likely(!tg3_irq_sync(tp)))
7047 napi_schedule(&tnapi->napi);
7049 return IRQ_RETVAL(1);
7052 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7054 struct tg3_napi *tnapi = dev_id;
7055 struct tg3 *tp = tnapi->tp;
7056 struct tg3_hw_status *sblk = tnapi->hw_status;
7057 unsigned int handled = 1;
7059 /* In INTx mode, it is possible for the interrupt to arrive at
7060 * the CPU before the status block posted prior to the interrupt.
7061 * Reading the PCI State register will confirm whether the
7062 * interrupt is ours and will flush the status block.
7064 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7065 if (tg3_flag(tp, CHIP_RESETTING) ||
7066 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7073 * Writing any value to intr-mbox-0 clears PCI INTA# and
7074 * chip-internal interrupt pending events.
7075 * Writing non-zero to intr-mbox-0 additional tells the
7076 * NIC to stop sending us irqs, engaging "in-intr-handler"
7079 * Flush the mailbox to de-assert the IRQ immediately to prevent
7080 * spurious interrupts. The flush impacts performance but
7081 * excessive spurious interrupts can be worse in some cases.
7083 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7084 if (tg3_irq_sync(tp))
7086 sblk->status &= ~SD_STATUS_UPDATED;
7087 if (likely(tg3_has_work(tnapi))) {
7088 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7089 napi_schedule(&tnapi->napi);
7091 /* No work, shared interrupt perhaps? re-enable
7092 * interrupts, and flush that PCI write
7094 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7098 return IRQ_RETVAL(handled);
7101 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7103 struct tg3_napi *tnapi = dev_id;
7104 struct tg3 *tp = tnapi->tp;
7105 struct tg3_hw_status *sblk = tnapi->hw_status;
7106 unsigned int handled = 1;
7108 /* In INTx mode, it is possible for the interrupt to arrive at
7109 * the CPU before the status block posted prior to the interrupt.
7110 * Reading the PCI State register will confirm whether the
7111 * interrupt is ours and will flush the status block.
7113 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7114 if (tg3_flag(tp, CHIP_RESETTING) ||
7115 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7122 * writing any value to intr-mbox-0 clears PCI INTA# and
7123 * chip-internal interrupt pending events.
7124 * writing non-zero to intr-mbox-0 additional tells the
7125 * NIC to stop sending us irqs, engaging "in-intr-handler"
7128 * Flush the mailbox to de-assert the IRQ immediately to prevent
7129 * spurious interrupts. The flush impacts performance but
7130 * excessive spurious interrupts can be worse in some cases.
7132 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7135 * In a shared interrupt configuration, sometimes other devices'
7136 * interrupts will scream. We record the current status tag here
7137 * so that the above check can report that the screaming interrupts
7138 * are unhandled. Eventually they will be silenced.
7140 tnapi->last_irq_tag = sblk->status_tag;
7142 if (tg3_irq_sync(tp))
7145 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7147 napi_schedule(&tnapi->napi);
7150 return IRQ_RETVAL(handled);
7153 /* ISR for interrupt test */
7154 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7156 struct tg3_napi *tnapi = dev_id;
7157 struct tg3 *tp = tnapi->tp;
7158 struct tg3_hw_status *sblk = tnapi->hw_status;
7160 if ((sblk->status & SD_STATUS_UPDATED) ||
7161 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7162 tg3_disable_ints(tp);
7163 return IRQ_RETVAL(1);
7165 return IRQ_RETVAL(0);
7168 #ifdef CONFIG_NET_POLL_CONTROLLER
7169 static void tg3_poll_controller(struct net_device *dev)
7172 struct tg3 *tp = netdev_priv(dev);
7174 if (tg3_irq_sync(tp))
7177 for (i = 0; i < tp->irq_cnt; i++)
7178 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7182 static void tg3_tx_timeout(struct net_device *dev)
7184 struct tg3 *tp = netdev_priv(dev);
7186 if (netif_msg_tx_err(tp)) {
7187 netdev_err(dev, "transmit timed out, resetting\n");
7191 tg3_reset_task_schedule(tp);
7194 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7195 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7197 u32 base = (u32) mapping & 0xffffffff;
7199 return (base > 0xffffdcc0) && (base + len + 8 < base);
7202 /* Test for DMA addresses > 40-bit */
7203 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7206 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7207 if (tg3_flag(tp, 40BIT_DMA_BUG))
7208 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7215 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7216 dma_addr_t mapping, u32 len, u32 flags,
7219 txbd->addr_hi = ((u64) mapping >> 32);
7220 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7221 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7222 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7225 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7226 dma_addr_t map, u32 len, u32 flags,
7229 struct tg3 *tp = tnapi->tp;
7232 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7235 if (tg3_4g_overflow_test(map, len))
7238 if (tg3_40bit_overflow_test(tp, map, len))
7241 if (tp->dma_limit) {
7242 u32 prvidx = *entry;
7243 u32 tmp_flag = flags & ~TXD_FLAG_END;
7244 while (len > tp->dma_limit && *budget) {
7245 u32 frag_len = tp->dma_limit;
7246 len -= tp->dma_limit;
7248 /* Avoid the 8byte DMA problem */
7250 len += tp->dma_limit / 2;
7251 frag_len = tp->dma_limit / 2;
7254 tnapi->tx_buffers[*entry].fragmented = true;
7256 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7257 frag_len, tmp_flag, mss, vlan);
7260 *entry = NEXT_TX(*entry);
7267 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7268 len, flags, mss, vlan);
7270 *entry = NEXT_TX(*entry);
7273 tnapi->tx_buffers[prvidx].fragmented = false;
7277 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7278 len, flags, mss, vlan);
7279 *entry = NEXT_TX(*entry);
7285 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7288 struct sk_buff *skb;
7289 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7294 pci_unmap_single(tnapi->tp->pdev,
7295 dma_unmap_addr(txb, mapping),
7299 while (txb->fragmented) {
7300 txb->fragmented = false;
7301 entry = NEXT_TX(entry);
7302 txb = &tnapi->tx_buffers[entry];
7305 for (i = 0; i <= last; i++) {
7306 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7308 entry = NEXT_TX(entry);
7309 txb = &tnapi->tx_buffers[entry];
7311 pci_unmap_page(tnapi->tp->pdev,
7312 dma_unmap_addr(txb, mapping),
7313 skb_frag_size(frag), PCI_DMA_TODEVICE);
7315 while (txb->fragmented) {
7316 txb->fragmented = false;
7317 entry = NEXT_TX(entry);
7318 txb = &tnapi->tx_buffers[entry];
7323 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7324 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7325 struct sk_buff **pskb,
7326 u32 *entry, u32 *budget,
7327 u32 base_flags, u32 mss, u32 vlan)
7329 struct tg3 *tp = tnapi->tp;
7330 struct sk_buff *new_skb, *skb = *pskb;
7331 dma_addr_t new_addr = 0;
7334 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7335 new_skb = skb_copy(skb, GFP_ATOMIC);
7337 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7339 new_skb = skb_copy_expand(skb,
7340 skb_headroom(skb) + more_headroom,
7341 skb_tailroom(skb), GFP_ATOMIC);
7347 /* New SKB is guaranteed to be linear. */
7348 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7350 /* Make sure the mapping succeeded */
7351 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7352 dev_kfree_skb(new_skb);
7355 u32 save_entry = *entry;
7357 base_flags |= TXD_FLAG_END;
7359 tnapi->tx_buffers[*entry].skb = new_skb;
7360 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7363 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7364 new_skb->len, base_flags,
7366 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7367 dev_kfree_skb(new_skb);
7378 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7380 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7381 * TSO header is greater than 80 bytes.
7383 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7385 struct sk_buff *segs, *nskb;
7386 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7388 /* Estimate the number of fragments in the worst case */
7389 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7390 netif_stop_queue(tp->dev);
7392 /* netif_tx_stop_queue() must be done before checking
7393 * checking tx index in tg3_tx_avail() below, because in
7394 * tg3_tx(), we update tx index before checking for
7395 * netif_tx_queue_stopped().
7398 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7399 return NETDEV_TX_BUSY;
7401 netif_wake_queue(tp->dev);
7404 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7406 goto tg3_tso_bug_end;
7412 tg3_start_xmit(nskb, tp->dev);
7418 return NETDEV_TX_OK;
7421 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7422 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7424 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7426 struct tg3 *tp = netdev_priv(dev);
7427 u32 len, entry, base_flags, mss, vlan = 0;
7429 int i = -1, would_hit_hwbug;
7431 struct tg3_napi *tnapi;
7432 struct netdev_queue *txq;
7435 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7436 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7437 if (tg3_flag(tp, ENABLE_TSS))
7440 budget = tg3_tx_avail(tnapi);
7442 /* We are running in BH disabled context with netif_tx_lock
7443 * and TX reclaim runs via tp->napi.poll inside of a software
7444 * interrupt. Furthermore, IRQ processing runs lockless so we have
7445 * no IRQ context deadlocks to worry about either. Rejoice!
7447 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7448 if (!netif_tx_queue_stopped(txq)) {
7449 netif_tx_stop_queue(txq);
7451 /* This is a hard error, log it. */
7453 "BUG! Tx Ring full when queue awake!\n");
7455 return NETDEV_TX_BUSY;
7458 entry = tnapi->tx_prod;
7460 if (skb->ip_summed == CHECKSUM_PARTIAL)
7461 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7463 mss = skb_shinfo(skb)->gso_size;
7466 u32 tcp_opt_len, hdr_len;
7468 if (skb_header_cloned(skb) &&
7469 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7473 tcp_opt_len = tcp_optlen(skb);
7475 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7477 if (!skb_is_gso_v6(skb)) {
7479 iph->tot_len = htons(mss + hdr_len);
7482 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7483 tg3_flag(tp, TSO_BUG))
7484 return tg3_tso_bug(tp, skb);
7486 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7487 TXD_FLAG_CPU_POST_DMA);
7489 if (tg3_flag(tp, HW_TSO_1) ||
7490 tg3_flag(tp, HW_TSO_2) ||
7491 tg3_flag(tp, HW_TSO_3)) {
7492 tcp_hdr(skb)->check = 0;
7493 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7495 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7500 if (tg3_flag(tp, HW_TSO_3)) {
7501 mss |= (hdr_len & 0xc) << 12;
7503 base_flags |= 0x00000010;
7504 base_flags |= (hdr_len & 0x3e0) << 5;
7505 } else if (tg3_flag(tp, HW_TSO_2))
7506 mss |= hdr_len << 9;
7507 else if (tg3_flag(tp, HW_TSO_1) ||
7508 tg3_asic_rev(tp) == ASIC_REV_5705) {
7509 if (tcp_opt_len || iph->ihl > 5) {
7512 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7513 mss |= (tsflags << 11);
7516 if (tcp_opt_len || iph->ihl > 5) {
7519 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7520 base_flags |= tsflags << 12;
7525 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7526 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7527 base_flags |= TXD_FLAG_JMB_PKT;
7529 if (vlan_tx_tag_present(skb)) {
7530 base_flags |= TXD_FLAG_VLAN;
7531 vlan = vlan_tx_tag_get(skb);
7534 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7535 tg3_flag(tp, TX_TSTAMP_EN)) {
7536 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7537 base_flags |= TXD_FLAG_HWTSTAMP;
7540 len = skb_headlen(skb);
7542 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7543 if (pci_dma_mapping_error(tp->pdev, mapping))
7547 tnapi->tx_buffers[entry].skb = skb;
7548 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7550 would_hit_hwbug = 0;
7552 if (tg3_flag(tp, 5701_DMA_BUG))
7553 would_hit_hwbug = 1;
7555 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7556 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7558 would_hit_hwbug = 1;
7559 } else if (skb_shinfo(skb)->nr_frags > 0) {
7562 if (!tg3_flag(tp, HW_TSO_1) &&
7563 !tg3_flag(tp, HW_TSO_2) &&
7564 !tg3_flag(tp, HW_TSO_3))
7567 /* Now loop through additional data
7568 * fragments, and queue them.
7570 last = skb_shinfo(skb)->nr_frags - 1;
7571 for (i = 0; i <= last; i++) {
7572 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7574 len = skb_frag_size(frag);
7575 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7576 len, DMA_TO_DEVICE);
7578 tnapi->tx_buffers[entry].skb = NULL;
7579 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7581 if (dma_mapping_error(&tp->pdev->dev, mapping))
7585 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7587 ((i == last) ? TXD_FLAG_END : 0),
7589 would_hit_hwbug = 1;
7595 if (would_hit_hwbug) {
7596 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7598 /* If the workaround fails due to memory/mapping
7599 * failure, silently drop this packet.
7601 entry = tnapi->tx_prod;
7602 budget = tg3_tx_avail(tnapi);
7603 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7604 base_flags, mss, vlan))
7608 skb_tx_timestamp(skb);
7609 netdev_tx_sent_queue(txq, skb->len);
7611 /* Sync BD data before updating mailbox */
7614 /* Packets are ready, update Tx producer idx local and on card. */
7615 tw32_tx_mbox(tnapi->prodmbox, entry);
7617 tnapi->tx_prod = entry;
7618 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7619 netif_tx_stop_queue(txq);
7621 /* netif_tx_stop_queue() must be done before checking
7622 * checking tx index in tg3_tx_avail() below, because in
7623 * tg3_tx(), we update tx index before checking for
7624 * netif_tx_queue_stopped().
7627 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7628 netif_tx_wake_queue(txq);
7632 return NETDEV_TX_OK;
7635 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7636 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7641 return NETDEV_TX_OK;
7644 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7647 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7648 MAC_MODE_PORT_MODE_MASK);
7650 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7652 if (!tg3_flag(tp, 5705_PLUS))
7653 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7655 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7656 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7658 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7660 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7662 if (tg3_flag(tp, 5705_PLUS) ||
7663 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7664 tg3_asic_rev(tp) == ASIC_REV_5700)
7665 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7668 tw32(MAC_MODE, tp->mac_mode);
7672 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7674 u32 val, bmcr, mac_mode, ptest = 0;
7676 tg3_phy_toggle_apd(tp, false);
7677 tg3_phy_toggle_automdix(tp, 0);
7679 if (extlpbk && tg3_phy_set_extloopbk(tp))
7682 bmcr = BMCR_FULLDPLX;
7687 bmcr |= BMCR_SPEED100;
7691 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7693 bmcr |= BMCR_SPEED100;
7696 bmcr |= BMCR_SPEED1000;
7701 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7702 tg3_readphy(tp, MII_CTRL1000, &val);
7703 val |= CTL1000_AS_MASTER |
7704 CTL1000_ENABLE_MASTER;
7705 tg3_writephy(tp, MII_CTRL1000, val);
7707 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7708 MII_TG3_FET_PTEST_TRIM_2;
7709 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7712 bmcr |= BMCR_LOOPBACK;
7714 tg3_writephy(tp, MII_BMCR, bmcr);
7716 /* The write needs to be flushed for the FETs */
7717 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7718 tg3_readphy(tp, MII_BMCR, &bmcr);
7722 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7723 tg3_asic_rev(tp) == ASIC_REV_5785) {
7724 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7725 MII_TG3_FET_PTEST_FRC_TX_LINK |
7726 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7728 /* The write needs to be flushed for the AC131 */
7729 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7732 /* Reset to prevent losing 1st rx packet intermittently */
7733 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7734 tg3_flag(tp, 5780_CLASS)) {
7735 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7737 tw32_f(MAC_RX_MODE, tp->rx_mode);
7740 mac_mode = tp->mac_mode &
7741 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7742 if (speed == SPEED_1000)
7743 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7745 mac_mode |= MAC_MODE_PORT_MODE_MII;
7747 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7748 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7750 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7751 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7752 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7753 mac_mode |= MAC_MODE_LINK_POLARITY;
7755 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7756 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7759 tw32(MAC_MODE, mac_mode);
7765 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7767 struct tg3 *tp = netdev_priv(dev);
7769 if (features & NETIF_F_LOOPBACK) {
7770 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7773 spin_lock_bh(&tp->lock);
7774 tg3_mac_loopback(tp, true);
7775 netif_carrier_on(tp->dev);
7776 spin_unlock_bh(&tp->lock);
7777 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7779 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7782 spin_lock_bh(&tp->lock);
7783 tg3_mac_loopback(tp, false);
7784 /* Force link status check */
7785 tg3_setup_phy(tp, 1);
7786 spin_unlock_bh(&tp->lock);
7787 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7791 static netdev_features_t tg3_fix_features(struct net_device *dev,
7792 netdev_features_t features)
7794 struct tg3 *tp = netdev_priv(dev);
7796 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7797 features &= ~NETIF_F_ALL_TSO;
7802 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7804 netdev_features_t changed = dev->features ^ features;
7806 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7807 tg3_set_loopback(dev, features);
7812 static void tg3_rx_prodring_free(struct tg3 *tp,
7813 struct tg3_rx_prodring_set *tpr)
7817 if (tpr != &tp->napi[0].prodring) {
7818 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7819 i = (i + 1) & tp->rx_std_ring_mask)
7820 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7823 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7824 for (i = tpr->rx_jmb_cons_idx;
7825 i != tpr->rx_jmb_prod_idx;
7826 i = (i + 1) & tp->rx_jmb_ring_mask) {
7827 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7835 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7836 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7839 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7840 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7841 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7846 /* Initialize rx rings for packet processing.
7848 * The chip has been shut down and the driver detached from
7849 * the networking, so no interrupts or new tx packets will
7850 * end up in the driver. tp->{tx,}lock are held and thus
7853 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7854 struct tg3_rx_prodring_set *tpr)
7856 u32 i, rx_pkt_dma_sz;
7858 tpr->rx_std_cons_idx = 0;
7859 tpr->rx_std_prod_idx = 0;
7860 tpr->rx_jmb_cons_idx = 0;
7861 tpr->rx_jmb_prod_idx = 0;
7863 if (tpr != &tp->napi[0].prodring) {
7864 memset(&tpr->rx_std_buffers[0], 0,
7865 TG3_RX_STD_BUFF_RING_SIZE(tp));
7866 if (tpr->rx_jmb_buffers)
7867 memset(&tpr->rx_jmb_buffers[0], 0,
7868 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7872 /* Zero out all descriptors. */
7873 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7875 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7876 if (tg3_flag(tp, 5780_CLASS) &&
7877 tp->dev->mtu > ETH_DATA_LEN)
7878 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7879 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7881 /* Initialize invariants of the rings, we only set this
7882 * stuff once. This works because the card does not
7883 * write into the rx buffer posting rings.
7885 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7886 struct tg3_rx_buffer_desc *rxd;
7888 rxd = &tpr->rx_std[i];
7889 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7890 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7891 rxd->opaque = (RXD_OPAQUE_RING_STD |
7892 (i << RXD_OPAQUE_INDEX_SHIFT));
7895 /* Now allocate fresh SKBs for each rx ring. */
7896 for (i = 0; i < tp->rx_pending; i++) {
7897 unsigned int frag_size;
7899 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7901 netdev_warn(tp->dev,
7902 "Using a smaller RX standard ring. Only "
7903 "%d out of %d buffers were allocated "
7904 "successfully\n", i, tp->rx_pending);
7912 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7915 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7917 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7920 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7921 struct tg3_rx_buffer_desc *rxd;
7923 rxd = &tpr->rx_jmb[i].std;
7924 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7925 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7927 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7928 (i << RXD_OPAQUE_INDEX_SHIFT));
7931 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7932 unsigned int frag_size;
7934 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7936 netdev_warn(tp->dev,
7937 "Using a smaller RX jumbo ring. Only %d "
7938 "out of %d buffers were allocated "
7939 "successfully\n", i, tp->rx_jumbo_pending);
7942 tp->rx_jumbo_pending = i;
7951 tg3_rx_prodring_free(tp, tpr);
7955 static void tg3_rx_prodring_fini(struct tg3 *tp,
7956 struct tg3_rx_prodring_set *tpr)
7958 kfree(tpr->rx_std_buffers);
7959 tpr->rx_std_buffers = NULL;
7960 kfree(tpr->rx_jmb_buffers);
7961 tpr->rx_jmb_buffers = NULL;
7963 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7964 tpr->rx_std, tpr->rx_std_mapping);
7968 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7969 tpr->rx_jmb, tpr->rx_jmb_mapping);
7974 static int tg3_rx_prodring_init(struct tg3 *tp,
7975 struct tg3_rx_prodring_set *tpr)
7977 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7979 if (!tpr->rx_std_buffers)
7982 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7983 TG3_RX_STD_RING_BYTES(tp),
7984 &tpr->rx_std_mapping,
7989 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7990 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7992 if (!tpr->rx_jmb_buffers)
7995 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7996 TG3_RX_JMB_RING_BYTES(tp),
7997 &tpr->rx_jmb_mapping,
8006 tg3_rx_prodring_fini(tp, tpr);
8010 /* Free up pending packets in all rx/tx rings.
8012 * The chip has been shut down and the driver detached from
8013 * the networking, so no interrupts or new tx packets will
8014 * end up in the driver. tp->{tx,}lock is not held and we are not
8015 * in an interrupt context and thus may sleep.
8017 static void tg3_free_rings(struct tg3 *tp)
8021 for (j = 0; j < tp->irq_cnt; j++) {
8022 struct tg3_napi *tnapi = &tp->napi[j];
8024 tg3_rx_prodring_free(tp, &tnapi->prodring);
8026 if (!tnapi->tx_buffers)
8029 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8030 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8035 tg3_tx_skb_unmap(tnapi, i,
8036 skb_shinfo(skb)->nr_frags - 1);
8038 dev_kfree_skb_any(skb);
8040 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8044 /* Initialize tx/rx rings for packet processing.
8046 * The chip has been shut down and the driver detached from
8047 * the networking, so no interrupts or new tx packets will
8048 * end up in the driver. tp->{tx,}lock are held and thus
8051 static int tg3_init_rings(struct tg3 *tp)
8055 /* Free up all the SKBs. */
8058 for (i = 0; i < tp->irq_cnt; i++) {
8059 struct tg3_napi *tnapi = &tp->napi[i];
8061 tnapi->last_tag = 0;
8062 tnapi->last_irq_tag = 0;
8063 tnapi->hw_status->status = 0;
8064 tnapi->hw_status->status_tag = 0;
8065 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8070 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8072 tnapi->rx_rcb_ptr = 0;
8074 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8076 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8085 static void tg3_mem_tx_release(struct tg3 *tp)
8089 for (i = 0; i < tp->irq_max; i++) {
8090 struct tg3_napi *tnapi = &tp->napi[i];
8092 if (tnapi->tx_ring) {
8093 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8094 tnapi->tx_ring, tnapi->tx_desc_mapping);
8095 tnapi->tx_ring = NULL;
8098 kfree(tnapi->tx_buffers);
8099 tnapi->tx_buffers = NULL;
8103 static int tg3_mem_tx_acquire(struct tg3 *tp)
8106 struct tg3_napi *tnapi = &tp->napi[0];
8108 /* If multivector TSS is enabled, vector 0 does not handle
8109 * tx interrupts. Don't allocate any resources for it.
8111 if (tg3_flag(tp, ENABLE_TSS))
8114 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8115 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8116 TG3_TX_RING_SIZE, GFP_KERNEL);
8117 if (!tnapi->tx_buffers)
8120 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8122 &tnapi->tx_desc_mapping,
8124 if (!tnapi->tx_ring)
8131 tg3_mem_tx_release(tp);
8135 static void tg3_mem_rx_release(struct tg3 *tp)
8139 for (i = 0; i < tp->irq_max; i++) {
8140 struct tg3_napi *tnapi = &tp->napi[i];
8142 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8147 dma_free_coherent(&tp->pdev->dev,
8148 TG3_RX_RCB_RING_BYTES(tp),
8150 tnapi->rx_rcb_mapping);
8151 tnapi->rx_rcb = NULL;
8155 static int tg3_mem_rx_acquire(struct tg3 *tp)
8157 unsigned int i, limit;
8159 limit = tp->rxq_cnt;
8161 /* If RSS is enabled, we need a (dummy) producer ring
8162 * set on vector zero. This is the true hw prodring.
8164 if (tg3_flag(tp, ENABLE_RSS))
8167 for (i = 0; i < limit; i++) {
8168 struct tg3_napi *tnapi = &tp->napi[i];
8170 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8173 /* If multivector RSS is enabled, vector 0
8174 * does not handle rx or tx interrupts.
8175 * Don't allocate any resources for it.
8177 if (!i && tg3_flag(tp, ENABLE_RSS))
8180 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8181 TG3_RX_RCB_RING_BYTES(tp),
8182 &tnapi->rx_rcb_mapping,
8183 GFP_KERNEL | __GFP_ZERO);
8191 tg3_mem_rx_release(tp);
8196 * Must not be invoked with interrupt sources disabled and
8197 * the hardware shutdown down.
8199 static void tg3_free_consistent(struct tg3 *tp)
8203 for (i = 0; i < tp->irq_cnt; i++) {
8204 struct tg3_napi *tnapi = &tp->napi[i];
8206 if (tnapi->hw_status) {
8207 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8209 tnapi->status_mapping);
8210 tnapi->hw_status = NULL;
8214 tg3_mem_rx_release(tp);
8215 tg3_mem_tx_release(tp);
8218 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8219 tp->hw_stats, tp->stats_mapping);
8220 tp->hw_stats = NULL;
8225 * Must not be invoked with interrupt sources disabled and
8226 * the hardware shutdown down. Can sleep.
8228 static int tg3_alloc_consistent(struct tg3 *tp)
8232 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8233 sizeof(struct tg3_hw_stats),
8235 GFP_KERNEL | __GFP_ZERO);
8239 for (i = 0; i < tp->irq_cnt; i++) {
8240 struct tg3_napi *tnapi = &tp->napi[i];
8241 struct tg3_hw_status *sblk;
8243 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8245 &tnapi->status_mapping,
8246 GFP_KERNEL | __GFP_ZERO);
8247 if (!tnapi->hw_status)
8250 sblk = tnapi->hw_status;
8252 if (tg3_flag(tp, ENABLE_RSS)) {
8253 u16 *prodptr = NULL;
8256 * When RSS is enabled, the status block format changes
8257 * slightly. The "rx_jumbo_consumer", "reserved",
8258 * and "rx_mini_consumer" members get mapped to the
8259 * other three rx return ring producer indexes.
8263 prodptr = &sblk->idx[0].rx_producer;
8266 prodptr = &sblk->rx_jumbo_consumer;
8269 prodptr = &sblk->reserved;
8272 prodptr = &sblk->rx_mini_consumer;
8275 tnapi->rx_rcb_prod_idx = prodptr;
8277 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8281 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8287 tg3_free_consistent(tp);
8291 #define MAX_WAIT_CNT 1000
8293 /* To stop a block, clear the enable bit and poll till it
8294 * clears. tp->lock is held.
8296 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8301 if (tg3_flag(tp, 5705_PLUS)) {
8308 /* We can't enable/disable these bits of the
8309 * 5705/5750, just say success.
8322 for (i = 0; i < MAX_WAIT_CNT; i++) {
8325 if ((val & enable_bit) == 0)
8329 if (i == MAX_WAIT_CNT && !silent) {
8330 dev_err(&tp->pdev->dev,
8331 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8339 /* tp->lock is held. */
8340 static int tg3_abort_hw(struct tg3 *tp, int silent)
8344 tg3_disable_ints(tp);
8346 tp->rx_mode &= ~RX_MODE_ENABLE;
8347 tw32_f(MAC_RX_MODE, tp->rx_mode);
8350 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8351 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8352 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8353 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8354 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8355 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8357 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8358 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8359 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8360 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8361 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8362 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8363 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8365 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8366 tw32_f(MAC_MODE, tp->mac_mode);
8369 tp->tx_mode &= ~TX_MODE_ENABLE;
8370 tw32_f(MAC_TX_MODE, tp->tx_mode);
8372 for (i = 0; i < MAX_WAIT_CNT; i++) {
8374 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8377 if (i >= MAX_WAIT_CNT) {
8378 dev_err(&tp->pdev->dev,
8379 "%s timed out, TX_MODE_ENABLE will not clear "
8380 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8384 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8385 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8386 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8388 tw32(FTQ_RESET, 0xffffffff);
8389 tw32(FTQ_RESET, 0x00000000);
8391 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8392 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8394 for (i = 0; i < tp->irq_cnt; i++) {
8395 struct tg3_napi *tnapi = &tp->napi[i];
8396 if (tnapi->hw_status)
8397 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8403 /* Save PCI command register before chip reset */
8404 static void tg3_save_pci_state(struct tg3 *tp)
8406 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8409 /* Restore PCI state after chip reset */
8410 static void tg3_restore_pci_state(struct tg3 *tp)
8414 /* Re-enable indirect register accesses. */
8415 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8416 tp->misc_host_ctrl);
8418 /* Set MAX PCI retry to zero. */
8419 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8420 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8421 tg3_flag(tp, PCIX_MODE))
8422 val |= PCISTATE_RETRY_SAME_DMA;
8423 /* Allow reads and writes to the APE register and memory space. */
8424 if (tg3_flag(tp, ENABLE_APE))
8425 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8426 PCISTATE_ALLOW_APE_SHMEM_WR |
8427 PCISTATE_ALLOW_APE_PSPACE_WR;
8428 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8430 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8432 if (!tg3_flag(tp, PCI_EXPRESS)) {
8433 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8434 tp->pci_cacheline_sz);
8435 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8439 /* Make sure PCI-X relaxed ordering bit is clear. */
8440 if (tg3_flag(tp, PCIX_MODE)) {
8443 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8445 pcix_cmd &= ~PCI_X_CMD_ERO;
8446 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8450 if (tg3_flag(tp, 5780_CLASS)) {
8452 /* Chip reset on 5780 will reset MSI enable bit,
8453 * so need to restore it.
8455 if (tg3_flag(tp, USING_MSI)) {
8458 pci_read_config_word(tp->pdev,
8459 tp->msi_cap + PCI_MSI_FLAGS,
8461 pci_write_config_word(tp->pdev,
8462 tp->msi_cap + PCI_MSI_FLAGS,
8463 ctrl | PCI_MSI_FLAGS_ENABLE);
8464 val = tr32(MSGINT_MODE);
8465 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8470 /* tp->lock is held. */
8471 static int tg3_chip_reset(struct tg3 *tp)
8474 void (*write_op)(struct tg3 *, u32, u32);
8479 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8481 /* No matching tg3_nvram_unlock() after this because
8482 * chip reset below will undo the nvram lock.
8484 tp->nvram_lock_cnt = 0;
8486 /* GRC_MISC_CFG core clock reset will clear the memory
8487 * enable bit in PCI register 4 and the MSI enable bit
8488 * on some chips, so we save relevant registers here.
8490 tg3_save_pci_state(tp);
8492 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8493 tg3_flag(tp, 5755_PLUS))
8494 tw32(GRC_FASTBOOT_PC, 0);
8497 * We must avoid the readl() that normally takes place.
8498 * It locks machines, causes machine checks, and other
8499 * fun things. So, temporarily disable the 5701
8500 * hardware workaround, while we do the reset.
8502 write_op = tp->write32;
8503 if (write_op == tg3_write_flush_reg32)
8504 tp->write32 = tg3_write32;
8506 /* Prevent the irq handler from reading or writing PCI registers
8507 * during chip reset when the memory enable bit in the PCI command
8508 * register may be cleared. The chip does not generate interrupt
8509 * at this time, but the irq handler may still be called due to irq
8510 * sharing or irqpoll.
8512 tg3_flag_set(tp, CHIP_RESETTING);
8513 for (i = 0; i < tp->irq_cnt; i++) {
8514 struct tg3_napi *tnapi = &tp->napi[i];
8515 if (tnapi->hw_status) {
8516 tnapi->hw_status->status = 0;
8517 tnapi->hw_status->status_tag = 0;
8519 tnapi->last_tag = 0;
8520 tnapi->last_irq_tag = 0;
8524 for (i = 0; i < tp->irq_cnt; i++)
8525 synchronize_irq(tp->napi[i].irq_vec);
8527 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8528 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8529 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8533 val = GRC_MISC_CFG_CORECLK_RESET;
8535 if (tg3_flag(tp, PCI_EXPRESS)) {
8536 /* Force PCIe 1.0a mode */
8537 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8538 !tg3_flag(tp, 57765_PLUS) &&
8539 tr32(TG3_PCIE_PHY_TSTCTL) ==
8540 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8541 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8543 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8544 tw32(GRC_MISC_CFG, (1 << 29));
8549 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8550 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8551 tw32(GRC_VCPU_EXT_CTRL,
8552 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8555 /* Manage gphy power for all CPMU absent PCIe devices. */
8556 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8557 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8559 tw32(GRC_MISC_CFG, val);
8561 /* restore 5701 hardware bug workaround write method */
8562 tp->write32 = write_op;
8564 /* Unfortunately, we have to delay before the PCI read back.
8565 * Some 575X chips even will not respond to a PCI cfg access
8566 * when the reset command is given to the chip.
8568 * How do these hardware designers expect things to work
8569 * properly if the PCI write is posted for a long period
8570 * of time? It is always necessary to have some method by
8571 * which a register read back can occur to push the write
8572 * out which does the reset.
8574 * For most tg3 variants the trick below was working.
8579 /* Flush PCI posted writes. The normal MMIO registers
8580 * are inaccessible at this time so this is the only
8581 * way to make this reliably (actually, this is no longer
8582 * the case, see above). I tried to use indirect
8583 * register read/write but this upset some 5701 variants.
8585 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8589 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8592 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8596 /* Wait for link training to complete. */
8597 for (j = 0; j < 5000; j++)
8600 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8601 pci_write_config_dword(tp->pdev, 0xc4,
8602 cfg_val | (1 << 15));
8605 /* Clear the "no snoop" and "relaxed ordering" bits. */
8606 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8608 * Older PCIe devices only support the 128 byte
8609 * MPS setting. Enforce the restriction.
8611 if (!tg3_flag(tp, CPMU_PRESENT))
8612 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8613 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8615 /* Clear error status */
8616 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8617 PCI_EXP_DEVSTA_CED |
8618 PCI_EXP_DEVSTA_NFED |
8619 PCI_EXP_DEVSTA_FED |
8620 PCI_EXP_DEVSTA_URD);
8623 tg3_restore_pci_state(tp);
8625 tg3_flag_clear(tp, CHIP_RESETTING);
8626 tg3_flag_clear(tp, ERROR_PROCESSED);
8629 if (tg3_flag(tp, 5780_CLASS))
8630 val = tr32(MEMARB_MODE);
8631 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8633 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8635 tw32(0x5000, 0x400);
8638 if (tg3_flag(tp, IS_SSB_CORE)) {
8640 * BCM4785: In order to avoid repercussions from using
8641 * potentially defective internal ROM, stop the Rx RISC CPU,
8642 * which is not required.
8645 tg3_halt_cpu(tp, RX_CPU_BASE);
8648 tw32(GRC_MODE, tp->grc_mode);
8650 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8653 tw32(0xc4, val | (1 << 15));
8656 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8657 tg3_asic_rev(tp) == ASIC_REV_5705) {
8658 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8659 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8660 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8661 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8664 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8665 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8667 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8668 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8673 tw32_f(MAC_MODE, val);
8676 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8678 err = tg3_poll_fw(tp);
8684 if (tg3_flag(tp, PCI_EXPRESS) &&
8685 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8686 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8687 !tg3_flag(tp, 57765_PLUS)) {
8690 tw32(0x7c00, val | (1 << 25));
8693 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8694 val = tr32(TG3_CPMU_CLCK_ORIDE);
8695 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8698 /* Reprobe ASF enable state. */
8699 tg3_flag_clear(tp, ENABLE_ASF);
8700 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8701 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8702 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8705 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8706 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8707 tg3_flag_set(tp, ENABLE_ASF);
8708 tp->last_event_jiffies = jiffies;
8709 if (tg3_flag(tp, 5750_PLUS))
8710 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8717 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8718 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8720 /* tp->lock is held. */
8721 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8727 tg3_write_sig_pre_reset(tp, kind);
8729 tg3_abort_hw(tp, silent);
8730 err = tg3_chip_reset(tp);
8732 __tg3_set_mac_addr(tp, 0);
8734 tg3_write_sig_legacy(tp, kind);
8735 tg3_write_sig_post_reset(tp, kind);
8738 /* Save the stats across chip resets... */
8739 tg3_get_nstats(tp, &tp->net_stats_prev);
8740 tg3_get_estats(tp, &tp->estats_prev);
8742 /* And make sure the next sample is new data */
8743 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8752 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8754 struct tg3 *tp = netdev_priv(dev);
8755 struct sockaddr *addr = p;
8756 int err = 0, skip_mac_1 = 0;
8758 if (!is_valid_ether_addr(addr->sa_data))
8759 return -EADDRNOTAVAIL;
8761 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8763 if (!netif_running(dev))
8766 if (tg3_flag(tp, ENABLE_ASF)) {
8767 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8769 addr0_high = tr32(MAC_ADDR_0_HIGH);
8770 addr0_low = tr32(MAC_ADDR_0_LOW);
8771 addr1_high = tr32(MAC_ADDR_1_HIGH);
8772 addr1_low = tr32(MAC_ADDR_1_LOW);
8774 /* Skip MAC addr 1 if ASF is using it. */
8775 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8776 !(addr1_high == 0 && addr1_low == 0))
8779 spin_lock_bh(&tp->lock);
8780 __tg3_set_mac_addr(tp, skip_mac_1);
8781 spin_unlock_bh(&tp->lock);
8786 /* tp->lock is held. */
8787 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8788 dma_addr_t mapping, u32 maxlen_flags,
8792 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8793 ((u64) mapping >> 32));
8795 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8796 ((u64) mapping & 0xffffffff));
8798 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8801 if (!tg3_flag(tp, 5705_PLUS))
8803 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8808 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8812 if (!tg3_flag(tp, ENABLE_TSS)) {
8813 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8814 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8815 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8817 tw32(HOSTCC_TXCOL_TICKS, 0);
8818 tw32(HOSTCC_TXMAX_FRAMES, 0);
8819 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8821 for (; i < tp->txq_cnt; i++) {
8824 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8825 tw32(reg, ec->tx_coalesce_usecs);
8826 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8827 tw32(reg, ec->tx_max_coalesced_frames);
8828 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8829 tw32(reg, ec->tx_max_coalesced_frames_irq);
8833 for (; i < tp->irq_max - 1; i++) {
8834 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8835 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8836 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8840 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8843 u32 limit = tp->rxq_cnt;
8845 if (!tg3_flag(tp, ENABLE_RSS)) {
8846 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8847 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8848 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8851 tw32(HOSTCC_RXCOL_TICKS, 0);
8852 tw32(HOSTCC_RXMAX_FRAMES, 0);
8853 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8856 for (; i < limit; i++) {
8859 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8860 tw32(reg, ec->rx_coalesce_usecs);
8861 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8862 tw32(reg, ec->rx_max_coalesced_frames);
8863 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8864 tw32(reg, ec->rx_max_coalesced_frames_irq);
8867 for (; i < tp->irq_max - 1; i++) {
8868 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8869 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8870 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8874 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8876 tg3_coal_tx_init(tp, ec);
8877 tg3_coal_rx_init(tp, ec);
8879 if (!tg3_flag(tp, 5705_PLUS)) {
8880 u32 val = ec->stats_block_coalesce_usecs;
8882 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8883 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8888 tw32(HOSTCC_STAT_COAL_TICKS, val);
8892 /* tp->lock is held. */
8893 static void tg3_rings_reset(struct tg3 *tp)
8896 u32 stblk, txrcb, rxrcb, limit;
8897 struct tg3_napi *tnapi = &tp->napi[0];
8899 /* Disable all transmit rings but the first. */
8900 if (!tg3_flag(tp, 5705_PLUS))
8901 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8902 else if (tg3_flag(tp, 5717_PLUS))
8903 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8904 else if (tg3_flag(tp, 57765_CLASS) ||
8905 tg3_asic_rev(tp) == ASIC_REV_5762)
8906 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8908 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8910 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8911 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8912 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8913 BDINFO_FLAGS_DISABLED);
8916 /* Disable all receive return rings but the first. */
8917 if (tg3_flag(tp, 5717_PLUS))
8918 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8919 else if (!tg3_flag(tp, 5705_PLUS))
8920 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8921 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8922 tg3_asic_rev(tp) == ASIC_REV_5762 ||
8923 tg3_flag(tp, 57765_CLASS))
8924 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8926 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8928 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8929 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8930 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8931 BDINFO_FLAGS_DISABLED);
8933 /* Disable interrupts */
8934 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8935 tp->napi[0].chk_msi_cnt = 0;
8936 tp->napi[0].last_rx_cons = 0;
8937 tp->napi[0].last_tx_cons = 0;
8939 /* Zero mailbox registers. */
8940 if (tg3_flag(tp, SUPPORT_MSIX)) {
8941 for (i = 1; i < tp->irq_max; i++) {
8942 tp->napi[i].tx_prod = 0;
8943 tp->napi[i].tx_cons = 0;
8944 if (tg3_flag(tp, ENABLE_TSS))
8945 tw32_mailbox(tp->napi[i].prodmbox, 0);
8946 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8947 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8948 tp->napi[i].chk_msi_cnt = 0;
8949 tp->napi[i].last_rx_cons = 0;
8950 tp->napi[i].last_tx_cons = 0;
8952 if (!tg3_flag(tp, ENABLE_TSS))
8953 tw32_mailbox(tp->napi[0].prodmbox, 0);
8955 tp->napi[0].tx_prod = 0;
8956 tp->napi[0].tx_cons = 0;
8957 tw32_mailbox(tp->napi[0].prodmbox, 0);
8958 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8961 /* Make sure the NIC-based send BD rings are disabled. */
8962 if (!tg3_flag(tp, 5705_PLUS)) {
8963 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8964 for (i = 0; i < 16; i++)
8965 tw32_tx_mbox(mbox + i * 8, 0);
8968 txrcb = NIC_SRAM_SEND_RCB;
8969 rxrcb = NIC_SRAM_RCV_RET_RCB;
8971 /* Clear status block in ram. */
8972 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8974 /* Set status block DMA address */
8975 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8976 ((u64) tnapi->status_mapping >> 32));
8977 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8978 ((u64) tnapi->status_mapping & 0xffffffff));
8980 if (tnapi->tx_ring) {
8981 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8982 (TG3_TX_RING_SIZE <<
8983 BDINFO_FLAGS_MAXLEN_SHIFT),
8984 NIC_SRAM_TX_BUFFER_DESC);
8985 txrcb += TG3_BDINFO_SIZE;
8988 if (tnapi->rx_rcb) {
8989 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8990 (tp->rx_ret_ring_mask + 1) <<
8991 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8992 rxrcb += TG3_BDINFO_SIZE;
8995 stblk = HOSTCC_STATBLCK_RING1;
8997 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8998 u64 mapping = (u64)tnapi->status_mapping;
8999 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9000 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9002 /* Clear status block in ram. */
9003 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9005 if (tnapi->tx_ring) {
9006 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9007 (TG3_TX_RING_SIZE <<
9008 BDINFO_FLAGS_MAXLEN_SHIFT),
9009 NIC_SRAM_TX_BUFFER_DESC);
9010 txrcb += TG3_BDINFO_SIZE;
9013 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9014 ((tp->rx_ret_ring_mask + 1) <<
9015 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9018 rxrcb += TG3_BDINFO_SIZE;
9022 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9024 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9026 if (!tg3_flag(tp, 5750_PLUS) ||
9027 tg3_flag(tp, 5780_CLASS) ||
9028 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9029 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9030 tg3_flag(tp, 57765_PLUS))
9031 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9032 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9033 tg3_asic_rev(tp) == ASIC_REV_5787)
9034 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9036 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9038 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9039 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9041 val = min(nic_rep_thresh, host_rep_thresh);
9042 tw32(RCVBDI_STD_THRESH, val);
9044 if (tg3_flag(tp, 57765_PLUS))
9045 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9047 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9050 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9052 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9054 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9055 tw32(RCVBDI_JUMBO_THRESH, val);
9057 if (tg3_flag(tp, 57765_PLUS))
9058 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9061 static inline u32 calc_crc(unsigned char *buf, int len)
9069 for (j = 0; j < len; j++) {
9072 for (k = 0; k < 8; k++) {
9085 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9087 /* accept or reject all multicast frames */
9088 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9089 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9090 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9091 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9094 static void __tg3_set_rx_mode(struct net_device *dev)
9096 struct tg3 *tp = netdev_priv(dev);
9099 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9100 RX_MODE_KEEP_VLAN_TAG);
9102 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9103 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9106 if (!tg3_flag(tp, ENABLE_ASF))
9107 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9110 if (dev->flags & IFF_PROMISC) {
9111 /* Promiscuous mode. */
9112 rx_mode |= RX_MODE_PROMISC;
9113 } else if (dev->flags & IFF_ALLMULTI) {
9114 /* Accept all multicast. */
9115 tg3_set_multi(tp, 1);
9116 } else if (netdev_mc_empty(dev)) {
9117 /* Reject all multicast. */
9118 tg3_set_multi(tp, 0);
9120 /* Accept one or more multicast(s). */
9121 struct netdev_hw_addr *ha;
9122 u32 mc_filter[4] = { 0, };
9127 netdev_for_each_mc_addr(ha, dev) {
9128 crc = calc_crc(ha->addr, ETH_ALEN);
9130 regidx = (bit & 0x60) >> 5;
9132 mc_filter[regidx] |= (1 << bit);
9135 tw32(MAC_HASH_REG_0, mc_filter[0]);
9136 tw32(MAC_HASH_REG_1, mc_filter[1]);
9137 tw32(MAC_HASH_REG_2, mc_filter[2]);
9138 tw32(MAC_HASH_REG_3, mc_filter[3]);
9141 if (rx_mode != tp->rx_mode) {
9142 tp->rx_mode = rx_mode;
9143 tw32_f(MAC_RX_MODE, rx_mode);
9148 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9152 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9153 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9156 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9160 if (!tg3_flag(tp, SUPPORT_MSIX))
9163 if (tp->rxq_cnt == 1) {
9164 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9168 /* Validate table against current IRQ count */
9169 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9170 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9174 if (i != TG3_RSS_INDIR_TBL_SIZE)
9175 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9178 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9181 u32 reg = MAC_RSS_INDIR_TBL_0;
9183 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9184 u32 val = tp->rss_ind_tbl[i];
9186 for (; i % 8; i++) {
9188 val |= tp->rss_ind_tbl[i];
9195 /* tp->lock is held. */
9196 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9198 u32 val, rdmac_mode;
9200 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9202 tg3_disable_ints(tp);
9206 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9208 if (tg3_flag(tp, INIT_COMPLETE))
9209 tg3_abort_hw(tp, 1);
9211 /* Enable MAC control of LPI */
9212 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9213 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9214 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9215 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9216 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9218 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9220 tw32_f(TG3_CPMU_EEE_CTRL,
9221 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9223 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9224 TG3_CPMU_EEEMD_LPI_IN_TX |
9225 TG3_CPMU_EEEMD_LPI_IN_RX |
9226 TG3_CPMU_EEEMD_EEE_ENABLE;
9228 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9229 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9231 if (tg3_flag(tp, ENABLE_APE))
9232 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9234 tw32_f(TG3_CPMU_EEE_MODE, val);
9236 tw32_f(TG3_CPMU_EEE_DBTMR1,
9237 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9238 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9240 tw32_f(TG3_CPMU_EEE_DBTMR2,
9241 TG3_CPMU_DBTMR2_APE_TX_2047US |
9242 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9248 err = tg3_chip_reset(tp);
9252 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9254 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9255 val = tr32(TG3_CPMU_CTRL);
9256 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9257 tw32(TG3_CPMU_CTRL, val);
9259 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9260 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9261 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9262 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9264 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9265 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9266 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9267 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9269 val = tr32(TG3_CPMU_HST_ACC);
9270 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9271 val |= CPMU_HST_ACC_MACCLK_6_25;
9272 tw32(TG3_CPMU_HST_ACC, val);
9275 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9276 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9277 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9278 PCIE_PWR_MGMT_L1_THRESH_4MS;
9279 tw32(PCIE_PWR_MGMT_THRESH, val);
9281 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9282 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9284 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9286 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9287 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9290 if (tg3_flag(tp, L1PLLPD_EN)) {
9291 u32 grc_mode = tr32(GRC_MODE);
9293 /* Access the lower 1K of PL PCIE block registers. */
9294 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9295 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9297 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9298 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9299 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9301 tw32(GRC_MODE, grc_mode);
9304 if (tg3_flag(tp, 57765_CLASS)) {
9305 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9306 u32 grc_mode = tr32(GRC_MODE);
9308 /* Access the lower 1K of PL PCIE block registers. */
9309 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9310 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9312 val = tr32(TG3_PCIE_TLDLPL_PORT +
9313 TG3_PCIE_PL_LO_PHYCTL5);
9314 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9315 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9317 tw32(GRC_MODE, grc_mode);
9320 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9323 /* Fix transmit hangs */
9324 val = tr32(TG3_CPMU_PADRNG_CTL);
9325 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9326 tw32(TG3_CPMU_PADRNG_CTL, val);
9328 grc_mode = tr32(GRC_MODE);
9330 /* Access the lower 1K of DL PCIE block registers. */
9331 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9332 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9334 val = tr32(TG3_PCIE_TLDLPL_PORT +
9335 TG3_PCIE_DL_LO_FTSMAX);
9336 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9337 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9338 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9340 tw32(GRC_MODE, grc_mode);
9343 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9344 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9345 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9346 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9349 /* This works around an issue with Athlon chipsets on
9350 * B3 tigon3 silicon. This bit has no effect on any
9351 * other revision. But do not set this on PCI Express
9352 * chips and don't even touch the clocks if the CPMU is present.
9354 if (!tg3_flag(tp, CPMU_PRESENT)) {
9355 if (!tg3_flag(tp, PCI_EXPRESS))
9356 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9357 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9360 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9361 tg3_flag(tp, PCIX_MODE)) {
9362 val = tr32(TG3PCI_PCISTATE);
9363 val |= PCISTATE_RETRY_SAME_DMA;
9364 tw32(TG3PCI_PCISTATE, val);
9367 if (tg3_flag(tp, ENABLE_APE)) {
9368 /* Allow reads and writes to the
9369 * APE register and memory space.
9371 val = tr32(TG3PCI_PCISTATE);
9372 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9373 PCISTATE_ALLOW_APE_SHMEM_WR |
9374 PCISTATE_ALLOW_APE_PSPACE_WR;
9375 tw32(TG3PCI_PCISTATE, val);
9378 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9379 /* Enable some hw fixes. */
9380 val = tr32(TG3PCI_MSI_DATA);
9381 val |= (1 << 26) | (1 << 28) | (1 << 29);
9382 tw32(TG3PCI_MSI_DATA, val);
9385 /* Descriptor ring init may make accesses to the
9386 * NIC SRAM area to setup the TX descriptors, so we
9387 * can only do this after the hardware has been
9388 * successfully reset.
9390 err = tg3_init_rings(tp);
9394 if (tg3_flag(tp, 57765_PLUS)) {
9395 val = tr32(TG3PCI_DMA_RW_CTRL) &
9396 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9397 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9398 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9399 if (!tg3_flag(tp, 57765_CLASS) &&
9400 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9401 tg3_asic_rev(tp) != ASIC_REV_5762)
9402 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9403 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9404 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9405 tg3_asic_rev(tp) != ASIC_REV_5761) {
9406 /* This value is determined during the probe time DMA
9407 * engine test, tg3_test_dma.
9409 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9412 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9413 GRC_MODE_4X_NIC_SEND_RINGS |
9414 GRC_MODE_NO_TX_PHDR_CSUM |
9415 GRC_MODE_NO_RX_PHDR_CSUM);
9416 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9418 /* Pseudo-header checksum is done by hardware logic and not
9419 * the offload processers, so make the chip do the pseudo-
9420 * header checksums on receive. For transmit it is more
9421 * convenient to do the pseudo-header checksum in software
9422 * as Linux does that on transmit for us in all cases.
9424 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9426 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9428 tw32(TG3_RX_PTP_CTL,
9429 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9431 if (tg3_flag(tp, PTP_CAPABLE))
9432 val |= GRC_MODE_TIME_SYNC_ENABLE;
9434 tw32(GRC_MODE, tp->grc_mode | val);
9436 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9437 val = tr32(GRC_MISC_CFG);
9439 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9440 tw32(GRC_MISC_CFG, val);
9442 /* Initialize MBUF/DESC pool. */
9443 if (tg3_flag(tp, 5750_PLUS)) {
9445 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9446 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9447 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9448 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9450 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9451 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9452 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9453 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9456 fw_len = tp->fw_len;
9457 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9458 tw32(BUFMGR_MB_POOL_ADDR,
9459 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9460 tw32(BUFMGR_MB_POOL_SIZE,
9461 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9464 if (tp->dev->mtu <= ETH_DATA_LEN) {
9465 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9466 tp->bufmgr_config.mbuf_read_dma_low_water);
9467 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9468 tp->bufmgr_config.mbuf_mac_rx_low_water);
9469 tw32(BUFMGR_MB_HIGH_WATER,
9470 tp->bufmgr_config.mbuf_high_water);
9472 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9473 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9474 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9475 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9476 tw32(BUFMGR_MB_HIGH_WATER,
9477 tp->bufmgr_config.mbuf_high_water_jumbo);
9479 tw32(BUFMGR_DMA_LOW_WATER,
9480 tp->bufmgr_config.dma_low_water);
9481 tw32(BUFMGR_DMA_HIGH_WATER,
9482 tp->bufmgr_config.dma_high_water);
9484 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9485 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9486 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9487 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9488 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9489 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9490 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9491 tw32(BUFMGR_MODE, val);
9492 for (i = 0; i < 2000; i++) {
9493 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9498 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9502 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9503 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9505 tg3_setup_rxbd_thresholds(tp);
9507 /* Initialize TG3_BDINFO's at:
9508 * RCVDBDI_STD_BD: standard eth size rx ring
9509 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9510 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9513 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9514 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9515 * ring attribute flags
9516 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9518 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9519 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9521 * The size of each ring is fixed in the firmware, but the location is
9524 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9525 ((u64) tpr->rx_std_mapping >> 32));
9526 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9527 ((u64) tpr->rx_std_mapping & 0xffffffff));
9528 if (!tg3_flag(tp, 5717_PLUS))
9529 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9530 NIC_SRAM_RX_BUFFER_DESC);
9532 /* Disable the mini ring */
9533 if (!tg3_flag(tp, 5705_PLUS))
9534 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9535 BDINFO_FLAGS_DISABLED);
9537 /* Program the jumbo buffer descriptor ring control
9538 * blocks on those devices that have them.
9540 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9541 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9543 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9544 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9545 ((u64) tpr->rx_jmb_mapping >> 32));
9546 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9547 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9548 val = TG3_RX_JMB_RING_SIZE(tp) <<
9549 BDINFO_FLAGS_MAXLEN_SHIFT;
9550 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9551 val | BDINFO_FLAGS_USE_EXT_RECV);
9552 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9553 tg3_flag(tp, 57765_CLASS) ||
9554 tg3_asic_rev(tp) == ASIC_REV_5762)
9555 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9556 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9558 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9559 BDINFO_FLAGS_DISABLED);
9562 if (tg3_flag(tp, 57765_PLUS)) {
9563 val = TG3_RX_STD_RING_SIZE(tp);
9564 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9565 val |= (TG3_RX_STD_DMA_SZ << 2);
9567 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9569 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9571 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9573 tpr->rx_std_prod_idx = tp->rx_pending;
9574 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9576 tpr->rx_jmb_prod_idx =
9577 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9578 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9580 tg3_rings_reset(tp);
9582 /* Initialize MAC address and backoff seed. */
9583 __tg3_set_mac_addr(tp, 0);
9585 /* MTU + ethernet header + FCS + optional VLAN tag */
9586 tw32(MAC_RX_MTU_SIZE,
9587 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9589 /* The slot time is changed by tg3_setup_phy if we
9590 * run at gigabit with half duplex.
9592 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9593 (6 << TX_LENGTHS_IPG_SHIFT) |
9594 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9596 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9597 tg3_asic_rev(tp) == ASIC_REV_5762)
9598 val |= tr32(MAC_TX_LENGTHS) &
9599 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9600 TX_LENGTHS_CNT_DWN_VAL_MSK);
9602 tw32(MAC_TX_LENGTHS, val);
9604 /* Receive rules. */
9605 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9606 tw32(RCVLPC_CONFIG, 0x0181);
9608 /* Calculate RDMAC_MODE setting early, we need it to determine
9609 * the RCVLPC_STATE_ENABLE mask.
9611 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9612 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9613 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9614 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9615 RDMAC_MODE_LNGREAD_ENAB);
9617 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9618 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9620 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9621 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9622 tg3_asic_rev(tp) == ASIC_REV_57780)
9623 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9624 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9625 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9627 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9628 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9629 if (tg3_flag(tp, TSO_CAPABLE) &&
9630 tg3_asic_rev(tp) == ASIC_REV_5705) {
9631 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9632 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9633 !tg3_flag(tp, IS_5788)) {
9634 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9638 if (tg3_flag(tp, PCI_EXPRESS))
9639 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9641 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9643 if (tp->dev->mtu <= ETH_DATA_LEN) {
9644 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9645 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9649 if (tg3_flag(tp, HW_TSO_1) ||
9650 tg3_flag(tp, HW_TSO_2) ||
9651 tg3_flag(tp, HW_TSO_3))
9652 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9654 if (tg3_flag(tp, 57765_PLUS) ||
9655 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9656 tg3_asic_rev(tp) == ASIC_REV_57780)
9657 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9659 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9660 tg3_asic_rev(tp) == ASIC_REV_5762)
9661 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9663 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9664 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9665 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9666 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9667 tg3_flag(tp, 57765_PLUS)) {
9670 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9671 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9673 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9676 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9677 tg3_asic_rev(tp) == ASIC_REV_5762) {
9678 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9679 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9680 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9681 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9682 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9683 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9685 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9688 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9689 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9690 tg3_asic_rev(tp) == ASIC_REV_5762) {
9693 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9694 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9696 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9700 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9701 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9704 /* Receive/send statistics. */
9705 if (tg3_flag(tp, 5750_PLUS)) {
9706 val = tr32(RCVLPC_STATS_ENABLE);
9707 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9708 tw32(RCVLPC_STATS_ENABLE, val);
9709 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9710 tg3_flag(tp, TSO_CAPABLE)) {
9711 val = tr32(RCVLPC_STATS_ENABLE);
9712 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9713 tw32(RCVLPC_STATS_ENABLE, val);
9715 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9717 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9718 tw32(SNDDATAI_STATSENAB, 0xffffff);
9719 tw32(SNDDATAI_STATSCTRL,
9720 (SNDDATAI_SCTRL_ENABLE |
9721 SNDDATAI_SCTRL_FASTUPD));
9723 /* Setup host coalescing engine. */
9724 tw32(HOSTCC_MODE, 0);
9725 for (i = 0; i < 2000; i++) {
9726 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9731 __tg3_set_coalesce(tp, &tp->coal);
9733 if (!tg3_flag(tp, 5705_PLUS)) {
9734 /* Status/statistics block address. See tg3_timer,
9735 * the tg3_periodic_fetch_stats call there, and
9736 * tg3_get_stats to see how this works for 5705/5750 chips.
9738 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9739 ((u64) tp->stats_mapping >> 32));
9740 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9741 ((u64) tp->stats_mapping & 0xffffffff));
9742 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9744 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9746 /* Clear statistics and status block memory areas */
9747 for (i = NIC_SRAM_STATS_BLK;
9748 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9750 tg3_write_mem(tp, i, 0);
9755 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9757 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9758 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9759 if (!tg3_flag(tp, 5705_PLUS))
9760 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9762 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9763 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9764 /* reset to prevent losing 1st rx packet intermittently */
9765 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9769 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9770 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9771 MAC_MODE_FHDE_ENABLE;
9772 if (tg3_flag(tp, ENABLE_APE))
9773 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9774 if (!tg3_flag(tp, 5705_PLUS) &&
9775 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9776 tg3_asic_rev(tp) != ASIC_REV_5700)
9777 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9778 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9781 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9782 * If TG3_FLAG_IS_NIC is zero, we should read the
9783 * register to preserve the GPIO settings for LOMs. The GPIOs,
9784 * whether used as inputs or outputs, are set by boot code after
9787 if (!tg3_flag(tp, IS_NIC)) {
9790 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9791 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9792 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9794 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9795 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9796 GRC_LCLCTRL_GPIO_OUTPUT3;
9798 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9799 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9801 tp->grc_local_ctrl &= ~gpio_mask;
9802 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9804 /* GPIO1 must be driven high for eeprom write protect */
9805 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9806 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9807 GRC_LCLCTRL_GPIO_OUTPUT1);
9809 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9812 if (tg3_flag(tp, USING_MSIX)) {
9813 val = tr32(MSGINT_MODE);
9814 val |= MSGINT_MODE_ENABLE;
9815 if (tp->irq_cnt > 1)
9816 val |= MSGINT_MODE_MULTIVEC_EN;
9817 if (!tg3_flag(tp, 1SHOT_MSI))
9818 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9819 tw32(MSGINT_MODE, val);
9822 if (!tg3_flag(tp, 5705_PLUS)) {
9823 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9827 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9828 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9829 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9830 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9831 WDMAC_MODE_LNGREAD_ENAB);
9833 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9834 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9835 if (tg3_flag(tp, TSO_CAPABLE) &&
9836 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9837 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9839 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9840 !tg3_flag(tp, IS_5788)) {
9841 val |= WDMAC_MODE_RX_ACCEL;
9845 /* Enable host coalescing bug fix */
9846 if (tg3_flag(tp, 5755_PLUS))
9847 val |= WDMAC_MODE_STATUS_TAG_FIX;
9849 if (tg3_asic_rev(tp) == ASIC_REV_5785)
9850 val |= WDMAC_MODE_BURST_ALL_DATA;
9852 tw32_f(WDMAC_MODE, val);
9855 if (tg3_flag(tp, PCIX_MODE)) {
9858 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9860 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9861 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9862 pcix_cmd |= PCI_X_CMD_READ_2K;
9863 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9864 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9865 pcix_cmd |= PCI_X_CMD_READ_2K;
9867 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9871 tw32_f(RDMAC_MODE, rdmac_mode);
9874 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9875 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9876 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9879 if (i < TG3_NUM_RDMA_CHANNELS) {
9880 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9881 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9882 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9883 tg3_flag_set(tp, 5719_RDMA_BUG);
9887 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9888 if (!tg3_flag(tp, 5705_PLUS))
9889 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9891 if (tg3_asic_rev(tp) == ASIC_REV_5761)
9893 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9895 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9897 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9898 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9899 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9900 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9901 val |= RCVDBDI_MODE_LRG_RING_SZ;
9902 tw32(RCVDBDI_MODE, val);
9903 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9904 if (tg3_flag(tp, HW_TSO_1) ||
9905 tg3_flag(tp, HW_TSO_2) ||
9906 tg3_flag(tp, HW_TSO_3))
9907 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9908 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9909 if (tg3_flag(tp, ENABLE_TSS))
9910 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9911 tw32(SNDBDI_MODE, val);
9912 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9914 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9915 err = tg3_load_5701_a0_firmware_fix(tp);
9920 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9921 /* Ignore any errors for the firmware download. If download
9922 * fails, the device will operate with EEE disabled
9924 tg3_load_57766_firmware(tp);
9927 if (tg3_flag(tp, TSO_CAPABLE)) {
9928 err = tg3_load_tso_firmware(tp);
9933 tp->tx_mode = TX_MODE_ENABLE;
9935 if (tg3_flag(tp, 5755_PLUS) ||
9936 tg3_asic_rev(tp) == ASIC_REV_5906)
9937 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9939 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9940 tg3_asic_rev(tp) == ASIC_REV_5762) {
9941 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9942 tp->tx_mode &= ~val;
9943 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9946 tw32_f(MAC_TX_MODE, tp->tx_mode);
9949 if (tg3_flag(tp, ENABLE_RSS)) {
9950 tg3_rss_write_indir_tbl(tp);
9952 /* Setup the "secret" hash key. */
9953 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9954 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9955 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9956 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9957 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9958 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9959 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9960 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9961 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9962 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9965 tp->rx_mode = RX_MODE_ENABLE;
9966 if (tg3_flag(tp, 5755_PLUS))
9967 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9969 if (tg3_flag(tp, ENABLE_RSS))
9970 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9971 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9972 RX_MODE_RSS_IPV6_HASH_EN |
9973 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9974 RX_MODE_RSS_IPV4_HASH_EN |
9975 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9977 tw32_f(MAC_RX_MODE, tp->rx_mode);
9980 tw32(MAC_LED_CTRL, tp->led_ctrl);
9982 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9983 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9984 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9987 tw32_f(MAC_RX_MODE, tp->rx_mode);
9990 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9991 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9992 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9993 /* Set drive transmission level to 1.2V */
9994 /* only if the signal pre-emphasis bit is not set */
9995 val = tr32(MAC_SERDES_CFG);
9998 tw32(MAC_SERDES_CFG, val);
10000 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10001 tw32(MAC_SERDES_CFG, 0x616000);
10004 /* Prevent chip from dropping frames when flow control
10007 if (tg3_flag(tp, 57765_CLASS))
10011 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10013 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10014 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10015 /* Use hardware link auto-negotiation */
10016 tg3_flag_set(tp, HW_AUTONEG);
10019 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10020 tg3_asic_rev(tp) == ASIC_REV_5714) {
10023 tmp = tr32(SERDES_RX_CTRL);
10024 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10025 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10026 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10027 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10030 if (!tg3_flag(tp, USE_PHYLIB)) {
10031 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10032 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10034 err = tg3_setup_phy(tp, 0);
10038 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10039 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10042 /* Clear CRC stats. */
10043 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10044 tg3_writephy(tp, MII_TG3_TEST1,
10045 tmp | MII_TG3_TEST1_CRC_EN);
10046 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10051 __tg3_set_rx_mode(tp->dev);
10053 /* Initialize receive rules. */
10054 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10055 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10056 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10057 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10059 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10063 if (tg3_flag(tp, ENABLE_ASF))
10067 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10069 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10071 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10073 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10075 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10077 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10079 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10081 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10083 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10085 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10087 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10089 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10091 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10093 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10101 if (tg3_flag(tp, ENABLE_APE))
10102 /* Write our heartbeat update interval to APE. */
10103 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10104 APE_HOST_HEARTBEAT_INT_DISABLE);
10106 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10111 /* Called at device open time to get the chip ready for
10112 * packet processing. Invoked with tp->lock held.
10114 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10116 tg3_switch_clocks(tp);
10118 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10120 return tg3_reset_hw(tp, reset_phy);
10123 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10127 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10128 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10130 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10133 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10134 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10135 memset(ocir, 0, TG3_OCIR_LEN);
10139 /* sysfs attributes for hwmon */
10140 static ssize_t tg3_show_temp(struct device *dev,
10141 struct device_attribute *devattr, char *buf)
10143 struct pci_dev *pdev = to_pci_dev(dev);
10144 struct net_device *netdev = pci_get_drvdata(pdev);
10145 struct tg3 *tp = netdev_priv(netdev);
10146 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10149 spin_lock_bh(&tp->lock);
10150 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10151 sizeof(temperature));
10152 spin_unlock_bh(&tp->lock);
10153 return sprintf(buf, "%u\n", temperature);
10157 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10158 TG3_TEMP_SENSOR_OFFSET);
10159 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10160 TG3_TEMP_CAUTION_OFFSET);
10161 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10162 TG3_TEMP_MAX_OFFSET);
10164 static struct attribute *tg3_attributes[] = {
10165 &sensor_dev_attr_temp1_input.dev_attr.attr,
10166 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10167 &sensor_dev_attr_temp1_max.dev_attr.attr,
10171 static const struct attribute_group tg3_group = {
10172 .attrs = tg3_attributes,
10175 static void tg3_hwmon_close(struct tg3 *tp)
10177 if (tp->hwmon_dev) {
10178 hwmon_device_unregister(tp->hwmon_dev);
10179 tp->hwmon_dev = NULL;
10180 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10184 static void tg3_hwmon_open(struct tg3 *tp)
10188 struct pci_dev *pdev = tp->pdev;
10189 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10191 tg3_sd_scan_scratchpad(tp, ocirs);
10193 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10194 if (!ocirs[i].src_data_length)
10197 size += ocirs[i].src_hdr_length;
10198 size += ocirs[i].src_data_length;
10204 /* Register hwmon sysfs hooks */
10205 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10207 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10211 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10212 if (IS_ERR(tp->hwmon_dev)) {
10213 tp->hwmon_dev = NULL;
10214 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10215 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10220 #define TG3_STAT_ADD32(PSTAT, REG) \
10221 do { u32 __val = tr32(REG); \
10222 (PSTAT)->low += __val; \
10223 if ((PSTAT)->low < __val) \
10224 (PSTAT)->high += 1; \
10227 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10229 struct tg3_hw_stats *sp = tp->hw_stats;
10234 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10235 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10236 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10237 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10238 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10239 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10240 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10241 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10242 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10243 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10244 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10245 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10246 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10247 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10248 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10249 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10252 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10253 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10254 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10255 tg3_flag_clear(tp, 5719_RDMA_BUG);
10258 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10259 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10260 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10261 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10262 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10263 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10264 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10265 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10266 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10267 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10268 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10269 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10270 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10271 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10273 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10274 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10275 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10276 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10277 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10279 u32 val = tr32(HOSTCC_FLOW_ATTN);
10280 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10282 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10283 sp->rx_discards.low += val;
10284 if (sp->rx_discards.low < val)
10285 sp->rx_discards.high += 1;
10287 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10289 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10292 static void tg3_chk_missed_msi(struct tg3 *tp)
10296 for (i = 0; i < tp->irq_cnt; i++) {
10297 struct tg3_napi *tnapi = &tp->napi[i];
10299 if (tg3_has_work(tnapi)) {
10300 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10301 tnapi->last_tx_cons == tnapi->tx_cons) {
10302 if (tnapi->chk_msi_cnt < 1) {
10303 tnapi->chk_msi_cnt++;
10309 tnapi->chk_msi_cnt = 0;
10310 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10311 tnapi->last_tx_cons = tnapi->tx_cons;
10315 static void tg3_timer(unsigned long __opaque)
10317 struct tg3 *tp = (struct tg3 *) __opaque;
10319 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10320 goto restart_timer;
10322 spin_lock(&tp->lock);
10324 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10325 tg3_flag(tp, 57765_CLASS))
10326 tg3_chk_missed_msi(tp);
10328 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10329 /* BCM4785: Flush posted writes from GbE to host memory. */
10333 if (!tg3_flag(tp, TAGGED_STATUS)) {
10334 /* All of this garbage is because when using non-tagged
10335 * IRQ status the mailbox/status_block protocol the chip
10336 * uses with the cpu is race prone.
10338 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10339 tw32(GRC_LOCAL_CTRL,
10340 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10342 tw32(HOSTCC_MODE, tp->coalesce_mode |
10343 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10346 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10347 spin_unlock(&tp->lock);
10348 tg3_reset_task_schedule(tp);
10349 goto restart_timer;
10353 /* This part only runs once per second. */
10354 if (!--tp->timer_counter) {
10355 if (tg3_flag(tp, 5705_PLUS))
10356 tg3_periodic_fetch_stats(tp);
10358 if (tp->setlpicnt && !--tp->setlpicnt)
10359 tg3_phy_eee_enable(tp);
10361 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10365 mac_stat = tr32(MAC_STATUS);
10368 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10369 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10371 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10375 tg3_setup_phy(tp, 0);
10376 } else if (tg3_flag(tp, POLL_SERDES)) {
10377 u32 mac_stat = tr32(MAC_STATUS);
10378 int need_setup = 0;
10381 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10384 if (!tp->link_up &&
10385 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10386 MAC_STATUS_SIGNAL_DET))) {
10390 if (!tp->serdes_counter) {
10393 ~MAC_MODE_PORT_MODE_MASK));
10395 tw32_f(MAC_MODE, tp->mac_mode);
10398 tg3_setup_phy(tp, 0);
10400 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10401 tg3_flag(tp, 5780_CLASS)) {
10402 tg3_serdes_parallel_detect(tp);
10405 tp->timer_counter = tp->timer_multiplier;
10408 /* Heartbeat is only sent once every 2 seconds.
10410 * The heartbeat is to tell the ASF firmware that the host
10411 * driver is still alive. In the event that the OS crashes,
10412 * ASF needs to reset the hardware to free up the FIFO space
10413 * that may be filled with rx packets destined for the host.
10414 * If the FIFO is full, ASF will no longer function properly.
10416 * Unintended resets have been reported on real time kernels
10417 * where the timer doesn't run on time. Netpoll will also have
10420 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10421 * to check the ring condition when the heartbeat is expiring
10422 * before doing the reset. This will prevent most unintended
10425 if (!--tp->asf_counter) {
10426 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10427 tg3_wait_for_event_ack(tp);
10429 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10430 FWCMD_NICDRV_ALIVE3);
10431 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10432 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10433 TG3_FW_UPDATE_TIMEOUT_SEC);
10435 tg3_generate_fw_event(tp);
10437 tp->asf_counter = tp->asf_multiplier;
10440 spin_unlock(&tp->lock);
10443 tp->timer.expires = jiffies + tp->timer_offset;
10444 add_timer(&tp->timer);
10447 static void tg3_timer_init(struct tg3 *tp)
10449 if (tg3_flag(tp, TAGGED_STATUS) &&
10450 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10451 !tg3_flag(tp, 57765_CLASS))
10452 tp->timer_offset = HZ;
10454 tp->timer_offset = HZ / 10;
10456 BUG_ON(tp->timer_offset > HZ);
10458 tp->timer_multiplier = (HZ / tp->timer_offset);
10459 tp->asf_multiplier = (HZ / tp->timer_offset) *
10460 TG3_FW_UPDATE_FREQ_SEC;
10462 init_timer(&tp->timer);
10463 tp->timer.data = (unsigned long) tp;
10464 tp->timer.function = tg3_timer;
10467 static void tg3_timer_start(struct tg3 *tp)
10469 tp->asf_counter = tp->asf_multiplier;
10470 tp->timer_counter = tp->timer_multiplier;
10472 tp->timer.expires = jiffies + tp->timer_offset;
10473 add_timer(&tp->timer);
10476 static void tg3_timer_stop(struct tg3 *tp)
10478 del_timer_sync(&tp->timer);
10481 /* Restart hardware after configuration changes, self-test, etc.
10482 * Invoked with tp->lock held.
10484 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10485 __releases(tp->lock)
10486 __acquires(tp->lock)
10490 err = tg3_init_hw(tp, reset_phy);
10492 netdev_err(tp->dev,
10493 "Failed to re-initialize device, aborting\n");
10494 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10495 tg3_full_unlock(tp);
10496 tg3_timer_stop(tp);
10498 tg3_napi_enable(tp);
10499 dev_close(tp->dev);
10500 tg3_full_lock(tp, 0);
10505 static void tg3_reset_task(struct work_struct *work)
10507 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10510 tg3_full_lock(tp, 0);
10512 if (!netif_running(tp->dev)) {
10513 tg3_flag_clear(tp, RESET_TASK_PENDING);
10514 tg3_full_unlock(tp);
10518 tg3_full_unlock(tp);
10522 tg3_netif_stop(tp);
10524 tg3_full_lock(tp, 1);
10526 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10527 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10528 tp->write32_rx_mbox = tg3_write_flush_reg32;
10529 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10530 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10533 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10534 err = tg3_init_hw(tp, 1);
10538 tg3_netif_start(tp);
10541 tg3_full_unlock(tp);
10546 tg3_flag_clear(tp, RESET_TASK_PENDING);
10549 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10552 unsigned long flags;
10554 struct tg3_napi *tnapi = &tp->napi[irq_num];
10556 if (tp->irq_cnt == 1)
10557 name = tp->dev->name;
10559 name = &tnapi->irq_lbl[0];
10560 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10561 name[IFNAMSIZ-1] = 0;
10564 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10566 if (tg3_flag(tp, 1SHOT_MSI))
10567 fn = tg3_msi_1shot;
10570 fn = tg3_interrupt;
10571 if (tg3_flag(tp, TAGGED_STATUS))
10572 fn = tg3_interrupt_tagged;
10573 flags = IRQF_SHARED;
10576 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10579 static int tg3_test_interrupt(struct tg3 *tp)
10581 struct tg3_napi *tnapi = &tp->napi[0];
10582 struct net_device *dev = tp->dev;
10583 int err, i, intr_ok = 0;
10586 if (!netif_running(dev))
10589 tg3_disable_ints(tp);
10591 free_irq(tnapi->irq_vec, tnapi);
10594 * Turn off MSI one shot mode. Otherwise this test has no
10595 * observable way to know whether the interrupt was delivered.
10597 if (tg3_flag(tp, 57765_PLUS)) {
10598 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10599 tw32(MSGINT_MODE, val);
10602 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10603 IRQF_SHARED, dev->name, tnapi);
10607 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10608 tg3_enable_ints(tp);
10610 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10613 for (i = 0; i < 5; i++) {
10614 u32 int_mbox, misc_host_ctrl;
10616 int_mbox = tr32_mailbox(tnapi->int_mbox);
10617 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10619 if ((int_mbox != 0) ||
10620 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10625 if (tg3_flag(tp, 57765_PLUS) &&
10626 tnapi->hw_status->status_tag != tnapi->last_tag)
10627 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10632 tg3_disable_ints(tp);
10634 free_irq(tnapi->irq_vec, tnapi);
10636 err = tg3_request_irq(tp, 0);
10642 /* Reenable MSI one shot mode. */
10643 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10644 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10645 tw32(MSGINT_MODE, val);
10653 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10654 * successfully restored
10656 static int tg3_test_msi(struct tg3 *tp)
10661 if (!tg3_flag(tp, USING_MSI))
10664 /* Turn off SERR reporting in case MSI terminates with Master
10667 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10668 pci_write_config_word(tp->pdev, PCI_COMMAND,
10669 pci_cmd & ~PCI_COMMAND_SERR);
10671 err = tg3_test_interrupt(tp);
10673 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10678 /* other failures */
10682 /* MSI test failed, go back to INTx mode */
10683 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10684 "to INTx mode. Please report this failure to the PCI "
10685 "maintainer and include system chipset information\n");
10687 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10689 pci_disable_msi(tp->pdev);
10691 tg3_flag_clear(tp, USING_MSI);
10692 tp->napi[0].irq_vec = tp->pdev->irq;
10694 err = tg3_request_irq(tp, 0);
10698 /* Need to reset the chip because the MSI cycle may have terminated
10699 * with Master Abort.
10701 tg3_full_lock(tp, 1);
10703 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10704 err = tg3_init_hw(tp, 1);
10706 tg3_full_unlock(tp);
10709 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10714 static int tg3_request_firmware(struct tg3 *tp)
10716 const struct tg3_firmware_hdr *fw_hdr;
10718 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10719 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10724 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10726 /* Firmware blob starts with version numbers, followed by
10727 * start address and _full_ length including BSS sections
10728 * (which must be longer than the actual data, of course
10731 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10732 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10733 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10734 tp->fw_len, tp->fw_needed);
10735 release_firmware(tp->fw);
10740 /* We no longer need firmware; we have it. */
10741 tp->fw_needed = NULL;
10745 static u32 tg3_irq_count(struct tg3 *tp)
10747 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10750 /* We want as many rx rings enabled as there are cpus.
10751 * In multiqueue MSI-X mode, the first MSI-X vector
10752 * only deals with link interrupts, etc, so we add
10753 * one to the number of vectors we are requesting.
10755 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10761 static bool tg3_enable_msix(struct tg3 *tp)
10764 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10766 tp->txq_cnt = tp->txq_req;
10767 tp->rxq_cnt = tp->rxq_req;
10769 tp->rxq_cnt = netif_get_num_default_rss_queues();
10770 if (tp->rxq_cnt > tp->rxq_max)
10771 tp->rxq_cnt = tp->rxq_max;
10773 /* Disable multiple TX rings by default. Simple round-robin hardware
10774 * scheduling of the TX rings can cause starvation of rings with
10775 * small packets when other rings have TSO or jumbo packets.
10780 tp->irq_cnt = tg3_irq_count(tp);
10782 for (i = 0; i < tp->irq_max; i++) {
10783 msix_ent[i].entry = i;
10784 msix_ent[i].vector = 0;
10787 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10790 } else if (rc != 0) {
10791 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10793 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10796 tp->rxq_cnt = max(rc - 1, 1);
10798 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10801 for (i = 0; i < tp->irq_max; i++)
10802 tp->napi[i].irq_vec = msix_ent[i].vector;
10804 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10805 pci_disable_msix(tp->pdev);
10809 if (tp->irq_cnt == 1)
10812 tg3_flag_set(tp, ENABLE_RSS);
10814 if (tp->txq_cnt > 1)
10815 tg3_flag_set(tp, ENABLE_TSS);
10817 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10822 static void tg3_ints_init(struct tg3 *tp)
10824 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10825 !tg3_flag(tp, TAGGED_STATUS)) {
10826 /* All MSI supporting chips should support tagged
10827 * status. Assert that this is the case.
10829 netdev_warn(tp->dev,
10830 "MSI without TAGGED_STATUS? Not using MSI\n");
10834 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10835 tg3_flag_set(tp, USING_MSIX);
10836 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10837 tg3_flag_set(tp, USING_MSI);
10839 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10840 u32 msi_mode = tr32(MSGINT_MODE);
10841 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10842 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10843 if (!tg3_flag(tp, 1SHOT_MSI))
10844 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10845 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10848 if (!tg3_flag(tp, USING_MSIX)) {
10850 tp->napi[0].irq_vec = tp->pdev->irq;
10853 if (tp->irq_cnt == 1) {
10856 netif_set_real_num_tx_queues(tp->dev, 1);
10857 netif_set_real_num_rx_queues(tp->dev, 1);
10861 static void tg3_ints_fini(struct tg3 *tp)
10863 if (tg3_flag(tp, USING_MSIX))
10864 pci_disable_msix(tp->pdev);
10865 else if (tg3_flag(tp, USING_MSI))
10866 pci_disable_msi(tp->pdev);
10867 tg3_flag_clear(tp, USING_MSI);
10868 tg3_flag_clear(tp, USING_MSIX);
10869 tg3_flag_clear(tp, ENABLE_RSS);
10870 tg3_flag_clear(tp, ENABLE_TSS);
10873 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10876 struct net_device *dev = tp->dev;
10880 * Setup interrupts first so we know how
10881 * many NAPI resources to allocate
10885 tg3_rss_check_indir_tbl(tp);
10887 /* The placement of this call is tied
10888 * to the setup and use of Host TX descriptors.
10890 err = tg3_alloc_consistent(tp);
10896 tg3_napi_enable(tp);
10898 for (i = 0; i < tp->irq_cnt; i++) {
10899 struct tg3_napi *tnapi = &tp->napi[i];
10900 err = tg3_request_irq(tp, i);
10902 for (i--; i >= 0; i--) {
10903 tnapi = &tp->napi[i];
10904 free_irq(tnapi->irq_vec, tnapi);
10910 tg3_full_lock(tp, 0);
10912 err = tg3_init_hw(tp, reset_phy);
10914 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10915 tg3_free_rings(tp);
10918 tg3_full_unlock(tp);
10923 if (test_irq && tg3_flag(tp, USING_MSI)) {
10924 err = tg3_test_msi(tp);
10927 tg3_full_lock(tp, 0);
10928 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10929 tg3_free_rings(tp);
10930 tg3_full_unlock(tp);
10935 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10936 u32 val = tr32(PCIE_TRANSACTION_CFG);
10938 tw32(PCIE_TRANSACTION_CFG,
10939 val | PCIE_TRANS_CFG_1SHOT_MSI);
10945 tg3_hwmon_open(tp);
10947 tg3_full_lock(tp, 0);
10949 tg3_timer_start(tp);
10950 tg3_flag_set(tp, INIT_COMPLETE);
10951 tg3_enable_ints(tp);
10956 tg3_ptp_resume(tp);
10959 tg3_full_unlock(tp);
10961 netif_tx_start_all_queues(dev);
10964 * Reset loopback feature if it was turned on while the device was down
10965 * make sure that it's installed properly now.
10967 if (dev->features & NETIF_F_LOOPBACK)
10968 tg3_set_loopback(dev, dev->features);
10973 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10974 struct tg3_napi *tnapi = &tp->napi[i];
10975 free_irq(tnapi->irq_vec, tnapi);
10979 tg3_napi_disable(tp);
10981 tg3_free_consistent(tp);
10989 static void tg3_stop(struct tg3 *tp)
10993 tg3_reset_task_cancel(tp);
10994 tg3_netif_stop(tp);
10996 tg3_timer_stop(tp);
10998 tg3_hwmon_close(tp);
11002 tg3_full_lock(tp, 1);
11004 tg3_disable_ints(tp);
11006 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11007 tg3_free_rings(tp);
11008 tg3_flag_clear(tp, INIT_COMPLETE);
11010 tg3_full_unlock(tp);
11012 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11013 struct tg3_napi *tnapi = &tp->napi[i];
11014 free_irq(tnapi->irq_vec, tnapi);
11021 tg3_free_consistent(tp);
11024 static int tg3_open(struct net_device *dev)
11026 struct tg3 *tp = netdev_priv(dev);
11029 if (tp->fw_needed) {
11030 err = tg3_request_firmware(tp);
11031 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11033 netdev_warn(tp->dev, "EEE capability disabled\n");
11034 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11035 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11036 netdev_warn(tp->dev, "EEE capability restored\n");
11037 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11039 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11043 netdev_warn(tp->dev, "TSO capability disabled\n");
11044 tg3_flag_clear(tp, TSO_CAPABLE);
11045 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11046 netdev_notice(tp->dev, "TSO capability restored\n");
11047 tg3_flag_set(tp, TSO_CAPABLE);
11051 tg3_carrier_off(tp);
11053 err = tg3_power_up(tp);
11057 tg3_full_lock(tp, 0);
11059 tg3_disable_ints(tp);
11060 tg3_flag_clear(tp, INIT_COMPLETE);
11062 tg3_full_unlock(tp);
11064 err = tg3_start(tp, true, true, true);
11066 tg3_frob_aux_power(tp, false);
11067 pci_set_power_state(tp->pdev, PCI_D3hot);
11070 if (tg3_flag(tp, PTP_CAPABLE)) {
11071 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11073 if (IS_ERR(tp->ptp_clock))
11074 tp->ptp_clock = NULL;
11080 static int tg3_close(struct net_device *dev)
11082 struct tg3 *tp = netdev_priv(dev);
11088 /* Clear stats across close / open calls */
11089 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11090 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11092 tg3_power_down(tp);
11094 tg3_carrier_off(tp);
11099 static inline u64 get_stat64(tg3_stat64_t *val)
11101 return ((u64)val->high << 32) | ((u64)val->low);
11104 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11106 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11108 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11109 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11110 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11113 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11114 tg3_writephy(tp, MII_TG3_TEST1,
11115 val | MII_TG3_TEST1_CRC_EN);
11116 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11120 tp->phy_crc_errors += val;
11122 return tp->phy_crc_errors;
11125 return get_stat64(&hw_stats->rx_fcs_errors);
11128 #define ESTAT_ADD(member) \
11129 estats->member = old_estats->member + \
11130 get_stat64(&hw_stats->member)
11132 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11134 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11135 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11137 ESTAT_ADD(rx_octets);
11138 ESTAT_ADD(rx_fragments);
11139 ESTAT_ADD(rx_ucast_packets);
11140 ESTAT_ADD(rx_mcast_packets);
11141 ESTAT_ADD(rx_bcast_packets);
11142 ESTAT_ADD(rx_fcs_errors);
11143 ESTAT_ADD(rx_align_errors);
11144 ESTAT_ADD(rx_xon_pause_rcvd);
11145 ESTAT_ADD(rx_xoff_pause_rcvd);
11146 ESTAT_ADD(rx_mac_ctrl_rcvd);
11147 ESTAT_ADD(rx_xoff_entered);
11148 ESTAT_ADD(rx_frame_too_long_errors);
11149 ESTAT_ADD(rx_jabbers);
11150 ESTAT_ADD(rx_undersize_packets);
11151 ESTAT_ADD(rx_in_length_errors);
11152 ESTAT_ADD(rx_out_length_errors);
11153 ESTAT_ADD(rx_64_or_less_octet_packets);
11154 ESTAT_ADD(rx_65_to_127_octet_packets);
11155 ESTAT_ADD(rx_128_to_255_octet_packets);
11156 ESTAT_ADD(rx_256_to_511_octet_packets);
11157 ESTAT_ADD(rx_512_to_1023_octet_packets);
11158 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11159 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11160 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11161 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11162 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11164 ESTAT_ADD(tx_octets);
11165 ESTAT_ADD(tx_collisions);
11166 ESTAT_ADD(tx_xon_sent);
11167 ESTAT_ADD(tx_xoff_sent);
11168 ESTAT_ADD(tx_flow_control);
11169 ESTAT_ADD(tx_mac_errors);
11170 ESTAT_ADD(tx_single_collisions);
11171 ESTAT_ADD(tx_mult_collisions);
11172 ESTAT_ADD(tx_deferred);
11173 ESTAT_ADD(tx_excessive_collisions);
11174 ESTAT_ADD(tx_late_collisions);
11175 ESTAT_ADD(tx_collide_2times);
11176 ESTAT_ADD(tx_collide_3times);
11177 ESTAT_ADD(tx_collide_4times);
11178 ESTAT_ADD(tx_collide_5times);
11179 ESTAT_ADD(tx_collide_6times);
11180 ESTAT_ADD(tx_collide_7times);
11181 ESTAT_ADD(tx_collide_8times);
11182 ESTAT_ADD(tx_collide_9times);
11183 ESTAT_ADD(tx_collide_10times);
11184 ESTAT_ADD(tx_collide_11times);
11185 ESTAT_ADD(tx_collide_12times);
11186 ESTAT_ADD(tx_collide_13times);
11187 ESTAT_ADD(tx_collide_14times);
11188 ESTAT_ADD(tx_collide_15times);
11189 ESTAT_ADD(tx_ucast_packets);
11190 ESTAT_ADD(tx_mcast_packets);
11191 ESTAT_ADD(tx_bcast_packets);
11192 ESTAT_ADD(tx_carrier_sense_errors);
11193 ESTAT_ADD(tx_discards);
11194 ESTAT_ADD(tx_errors);
11196 ESTAT_ADD(dma_writeq_full);
11197 ESTAT_ADD(dma_write_prioq_full);
11198 ESTAT_ADD(rxbds_empty);
11199 ESTAT_ADD(rx_discards);
11200 ESTAT_ADD(rx_errors);
11201 ESTAT_ADD(rx_threshold_hit);
11203 ESTAT_ADD(dma_readq_full);
11204 ESTAT_ADD(dma_read_prioq_full);
11205 ESTAT_ADD(tx_comp_queue_full);
11207 ESTAT_ADD(ring_set_send_prod_index);
11208 ESTAT_ADD(ring_status_update);
11209 ESTAT_ADD(nic_irqs);
11210 ESTAT_ADD(nic_avoided_irqs);
11211 ESTAT_ADD(nic_tx_threshold_hit);
11213 ESTAT_ADD(mbuf_lwm_thresh_hit);
11216 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11218 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11219 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11221 stats->rx_packets = old_stats->rx_packets +
11222 get_stat64(&hw_stats->rx_ucast_packets) +
11223 get_stat64(&hw_stats->rx_mcast_packets) +
11224 get_stat64(&hw_stats->rx_bcast_packets);
11226 stats->tx_packets = old_stats->tx_packets +
11227 get_stat64(&hw_stats->tx_ucast_packets) +
11228 get_stat64(&hw_stats->tx_mcast_packets) +
11229 get_stat64(&hw_stats->tx_bcast_packets);
11231 stats->rx_bytes = old_stats->rx_bytes +
11232 get_stat64(&hw_stats->rx_octets);
11233 stats->tx_bytes = old_stats->tx_bytes +
11234 get_stat64(&hw_stats->tx_octets);
11236 stats->rx_errors = old_stats->rx_errors +
11237 get_stat64(&hw_stats->rx_errors);
11238 stats->tx_errors = old_stats->tx_errors +
11239 get_stat64(&hw_stats->tx_errors) +
11240 get_stat64(&hw_stats->tx_mac_errors) +
11241 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11242 get_stat64(&hw_stats->tx_discards);
11244 stats->multicast = old_stats->multicast +
11245 get_stat64(&hw_stats->rx_mcast_packets);
11246 stats->collisions = old_stats->collisions +
11247 get_stat64(&hw_stats->tx_collisions);
11249 stats->rx_length_errors = old_stats->rx_length_errors +
11250 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11251 get_stat64(&hw_stats->rx_undersize_packets);
11253 stats->rx_over_errors = old_stats->rx_over_errors +
11254 get_stat64(&hw_stats->rxbds_empty);
11255 stats->rx_frame_errors = old_stats->rx_frame_errors +
11256 get_stat64(&hw_stats->rx_align_errors);
11257 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11258 get_stat64(&hw_stats->tx_discards);
11259 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11260 get_stat64(&hw_stats->tx_carrier_sense_errors);
11262 stats->rx_crc_errors = old_stats->rx_crc_errors +
11263 tg3_calc_crc_errors(tp);
11265 stats->rx_missed_errors = old_stats->rx_missed_errors +
11266 get_stat64(&hw_stats->rx_discards);
11268 stats->rx_dropped = tp->rx_dropped;
11269 stats->tx_dropped = tp->tx_dropped;
11272 static int tg3_get_regs_len(struct net_device *dev)
11274 return TG3_REG_BLK_SIZE;
11277 static void tg3_get_regs(struct net_device *dev,
11278 struct ethtool_regs *regs, void *_p)
11280 struct tg3 *tp = netdev_priv(dev);
11284 memset(_p, 0, TG3_REG_BLK_SIZE);
11286 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11289 tg3_full_lock(tp, 0);
11291 tg3_dump_legacy_regs(tp, (u32 *)_p);
11293 tg3_full_unlock(tp);
11296 static int tg3_get_eeprom_len(struct net_device *dev)
11298 struct tg3 *tp = netdev_priv(dev);
11300 return tp->nvram_size;
11303 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11305 struct tg3 *tp = netdev_priv(dev);
11308 u32 i, offset, len, b_offset, b_count;
11311 if (tg3_flag(tp, NO_NVRAM))
11314 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11317 offset = eeprom->offset;
11321 eeprom->magic = TG3_EEPROM_MAGIC;
11324 /* adjustments to start on required 4 byte boundary */
11325 b_offset = offset & 3;
11326 b_count = 4 - b_offset;
11327 if (b_count > len) {
11328 /* i.e. offset=1 len=2 */
11331 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11334 memcpy(data, ((char *)&val) + b_offset, b_count);
11337 eeprom->len += b_count;
11340 /* read bytes up to the last 4 byte boundary */
11341 pd = &data[eeprom->len];
11342 for (i = 0; i < (len - (len & 3)); i += 4) {
11343 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11348 memcpy(pd + i, &val, 4);
11353 /* read last bytes not ending on 4 byte boundary */
11354 pd = &data[eeprom->len];
11356 b_offset = offset + len - b_count;
11357 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11360 memcpy(pd, &val, b_count);
11361 eeprom->len += b_count;
11366 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11368 struct tg3 *tp = netdev_priv(dev);
11370 u32 offset, len, b_offset, odd_len;
11374 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11377 if (tg3_flag(tp, NO_NVRAM) ||
11378 eeprom->magic != TG3_EEPROM_MAGIC)
11381 offset = eeprom->offset;
11384 if ((b_offset = (offset & 3))) {
11385 /* adjustments to start on required 4 byte boundary */
11386 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11397 /* adjustments to end on required 4 byte boundary */
11399 len = (len + 3) & ~3;
11400 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11406 if (b_offset || odd_len) {
11407 buf = kmalloc(len, GFP_KERNEL);
11411 memcpy(buf, &start, 4);
11413 memcpy(buf+len-4, &end, 4);
11414 memcpy(buf + b_offset, data, eeprom->len);
11417 ret = tg3_nvram_write_block(tp, offset, len, buf);
11425 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11427 struct tg3 *tp = netdev_priv(dev);
11429 if (tg3_flag(tp, USE_PHYLIB)) {
11430 struct phy_device *phydev;
11431 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11433 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11434 return phy_ethtool_gset(phydev, cmd);
11437 cmd->supported = (SUPPORTED_Autoneg);
11439 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11440 cmd->supported |= (SUPPORTED_1000baseT_Half |
11441 SUPPORTED_1000baseT_Full);
11443 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11444 cmd->supported |= (SUPPORTED_100baseT_Half |
11445 SUPPORTED_100baseT_Full |
11446 SUPPORTED_10baseT_Half |
11447 SUPPORTED_10baseT_Full |
11449 cmd->port = PORT_TP;
11451 cmd->supported |= SUPPORTED_FIBRE;
11452 cmd->port = PORT_FIBRE;
11455 cmd->advertising = tp->link_config.advertising;
11456 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11457 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11458 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11459 cmd->advertising |= ADVERTISED_Pause;
11461 cmd->advertising |= ADVERTISED_Pause |
11462 ADVERTISED_Asym_Pause;
11464 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11465 cmd->advertising |= ADVERTISED_Asym_Pause;
11468 if (netif_running(dev) && tp->link_up) {
11469 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11470 cmd->duplex = tp->link_config.active_duplex;
11471 cmd->lp_advertising = tp->link_config.rmt_adv;
11472 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11473 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11474 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11476 cmd->eth_tp_mdix = ETH_TP_MDI;
11479 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11480 cmd->duplex = DUPLEX_UNKNOWN;
11481 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11483 cmd->phy_address = tp->phy_addr;
11484 cmd->transceiver = XCVR_INTERNAL;
11485 cmd->autoneg = tp->link_config.autoneg;
11491 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11493 struct tg3 *tp = netdev_priv(dev);
11494 u32 speed = ethtool_cmd_speed(cmd);
11496 if (tg3_flag(tp, USE_PHYLIB)) {
11497 struct phy_device *phydev;
11498 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11500 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11501 return phy_ethtool_sset(phydev, cmd);
11504 if (cmd->autoneg != AUTONEG_ENABLE &&
11505 cmd->autoneg != AUTONEG_DISABLE)
11508 if (cmd->autoneg == AUTONEG_DISABLE &&
11509 cmd->duplex != DUPLEX_FULL &&
11510 cmd->duplex != DUPLEX_HALF)
11513 if (cmd->autoneg == AUTONEG_ENABLE) {
11514 u32 mask = ADVERTISED_Autoneg |
11516 ADVERTISED_Asym_Pause;
11518 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11519 mask |= ADVERTISED_1000baseT_Half |
11520 ADVERTISED_1000baseT_Full;
11522 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11523 mask |= ADVERTISED_100baseT_Half |
11524 ADVERTISED_100baseT_Full |
11525 ADVERTISED_10baseT_Half |
11526 ADVERTISED_10baseT_Full |
11529 mask |= ADVERTISED_FIBRE;
11531 if (cmd->advertising & ~mask)
11534 mask &= (ADVERTISED_1000baseT_Half |
11535 ADVERTISED_1000baseT_Full |
11536 ADVERTISED_100baseT_Half |
11537 ADVERTISED_100baseT_Full |
11538 ADVERTISED_10baseT_Half |
11539 ADVERTISED_10baseT_Full);
11541 cmd->advertising &= mask;
11543 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11544 if (speed != SPEED_1000)
11547 if (cmd->duplex != DUPLEX_FULL)
11550 if (speed != SPEED_100 &&
11556 tg3_full_lock(tp, 0);
11558 tp->link_config.autoneg = cmd->autoneg;
11559 if (cmd->autoneg == AUTONEG_ENABLE) {
11560 tp->link_config.advertising = (cmd->advertising |
11561 ADVERTISED_Autoneg);
11562 tp->link_config.speed = SPEED_UNKNOWN;
11563 tp->link_config.duplex = DUPLEX_UNKNOWN;
11565 tp->link_config.advertising = 0;
11566 tp->link_config.speed = speed;
11567 tp->link_config.duplex = cmd->duplex;
11570 if (netif_running(dev))
11571 tg3_setup_phy(tp, 1);
11573 tg3_full_unlock(tp);
11578 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11580 struct tg3 *tp = netdev_priv(dev);
11582 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11583 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11584 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11585 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11588 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11590 struct tg3 *tp = netdev_priv(dev);
11592 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11593 wol->supported = WAKE_MAGIC;
11595 wol->supported = 0;
11597 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11598 wol->wolopts = WAKE_MAGIC;
11599 memset(&wol->sopass, 0, sizeof(wol->sopass));
11602 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11604 struct tg3 *tp = netdev_priv(dev);
11605 struct device *dp = &tp->pdev->dev;
11607 if (wol->wolopts & ~WAKE_MAGIC)
11609 if ((wol->wolopts & WAKE_MAGIC) &&
11610 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11613 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11615 spin_lock_bh(&tp->lock);
11616 if (device_may_wakeup(dp))
11617 tg3_flag_set(tp, WOL_ENABLE);
11619 tg3_flag_clear(tp, WOL_ENABLE);
11620 spin_unlock_bh(&tp->lock);
11625 static u32 tg3_get_msglevel(struct net_device *dev)
11627 struct tg3 *tp = netdev_priv(dev);
11628 return tp->msg_enable;
11631 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11633 struct tg3 *tp = netdev_priv(dev);
11634 tp->msg_enable = value;
11637 static int tg3_nway_reset(struct net_device *dev)
11639 struct tg3 *tp = netdev_priv(dev);
11642 if (!netif_running(dev))
11645 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11648 if (tg3_flag(tp, USE_PHYLIB)) {
11649 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11651 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11655 spin_lock_bh(&tp->lock);
11657 tg3_readphy(tp, MII_BMCR, &bmcr);
11658 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11659 ((bmcr & BMCR_ANENABLE) ||
11660 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11661 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11665 spin_unlock_bh(&tp->lock);
11671 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11673 struct tg3 *tp = netdev_priv(dev);
11675 ering->rx_max_pending = tp->rx_std_ring_mask;
11676 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11677 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11679 ering->rx_jumbo_max_pending = 0;
11681 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11683 ering->rx_pending = tp->rx_pending;
11684 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11685 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11687 ering->rx_jumbo_pending = 0;
11689 ering->tx_pending = tp->napi[0].tx_pending;
11692 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11694 struct tg3 *tp = netdev_priv(dev);
11695 int i, irq_sync = 0, err = 0;
11697 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11698 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11699 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11700 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11701 (tg3_flag(tp, TSO_BUG) &&
11702 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11705 if (netif_running(dev)) {
11707 tg3_netif_stop(tp);
11711 tg3_full_lock(tp, irq_sync);
11713 tp->rx_pending = ering->rx_pending;
11715 if (tg3_flag(tp, MAX_RXPEND_64) &&
11716 tp->rx_pending > 63)
11717 tp->rx_pending = 63;
11718 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11720 for (i = 0; i < tp->irq_max; i++)
11721 tp->napi[i].tx_pending = ering->tx_pending;
11723 if (netif_running(dev)) {
11724 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11725 err = tg3_restart_hw(tp, 1);
11727 tg3_netif_start(tp);
11730 tg3_full_unlock(tp);
11732 if (irq_sync && !err)
11738 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11740 struct tg3 *tp = netdev_priv(dev);
11742 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11744 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11745 epause->rx_pause = 1;
11747 epause->rx_pause = 0;
11749 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11750 epause->tx_pause = 1;
11752 epause->tx_pause = 0;
11755 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11757 struct tg3 *tp = netdev_priv(dev);
11760 if (tg3_flag(tp, USE_PHYLIB)) {
11762 struct phy_device *phydev;
11764 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11766 if (!(phydev->supported & SUPPORTED_Pause) ||
11767 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11768 (epause->rx_pause != epause->tx_pause)))
11771 tp->link_config.flowctrl = 0;
11772 if (epause->rx_pause) {
11773 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11775 if (epause->tx_pause) {
11776 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11777 newadv = ADVERTISED_Pause;
11779 newadv = ADVERTISED_Pause |
11780 ADVERTISED_Asym_Pause;
11781 } else if (epause->tx_pause) {
11782 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11783 newadv = ADVERTISED_Asym_Pause;
11787 if (epause->autoneg)
11788 tg3_flag_set(tp, PAUSE_AUTONEG);
11790 tg3_flag_clear(tp, PAUSE_AUTONEG);
11792 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11793 u32 oldadv = phydev->advertising &
11794 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11795 if (oldadv != newadv) {
11796 phydev->advertising &=
11797 ~(ADVERTISED_Pause |
11798 ADVERTISED_Asym_Pause);
11799 phydev->advertising |= newadv;
11800 if (phydev->autoneg) {
11802 * Always renegotiate the link to
11803 * inform our link partner of our
11804 * flow control settings, even if the
11805 * flow control is forced. Let
11806 * tg3_adjust_link() do the final
11807 * flow control setup.
11809 return phy_start_aneg(phydev);
11813 if (!epause->autoneg)
11814 tg3_setup_flow_control(tp, 0, 0);
11816 tp->link_config.advertising &=
11817 ~(ADVERTISED_Pause |
11818 ADVERTISED_Asym_Pause);
11819 tp->link_config.advertising |= newadv;
11824 if (netif_running(dev)) {
11825 tg3_netif_stop(tp);
11829 tg3_full_lock(tp, irq_sync);
11831 if (epause->autoneg)
11832 tg3_flag_set(tp, PAUSE_AUTONEG);
11834 tg3_flag_clear(tp, PAUSE_AUTONEG);
11835 if (epause->rx_pause)
11836 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11838 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11839 if (epause->tx_pause)
11840 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11842 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11844 if (netif_running(dev)) {
11845 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11846 err = tg3_restart_hw(tp, 1);
11848 tg3_netif_start(tp);
11851 tg3_full_unlock(tp);
11857 static int tg3_get_sset_count(struct net_device *dev, int sset)
11861 return TG3_NUM_TEST;
11863 return TG3_NUM_STATS;
11865 return -EOPNOTSUPP;
11869 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11870 u32 *rules __always_unused)
11872 struct tg3 *tp = netdev_priv(dev);
11874 if (!tg3_flag(tp, SUPPORT_MSIX))
11875 return -EOPNOTSUPP;
11877 switch (info->cmd) {
11878 case ETHTOOL_GRXRINGS:
11879 if (netif_running(tp->dev))
11880 info->data = tp->rxq_cnt;
11882 info->data = num_online_cpus();
11883 if (info->data > TG3_RSS_MAX_NUM_QS)
11884 info->data = TG3_RSS_MAX_NUM_QS;
11887 /* The first interrupt vector only
11888 * handles link interrupts.
11894 return -EOPNOTSUPP;
11898 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11901 struct tg3 *tp = netdev_priv(dev);
11903 if (tg3_flag(tp, SUPPORT_MSIX))
11904 size = TG3_RSS_INDIR_TBL_SIZE;
11909 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11911 struct tg3 *tp = netdev_priv(dev);
11914 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11915 indir[i] = tp->rss_ind_tbl[i];
11920 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11922 struct tg3 *tp = netdev_priv(dev);
11925 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11926 tp->rss_ind_tbl[i] = indir[i];
11928 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11931 /* It is legal to write the indirection
11932 * table while the device is running.
11934 tg3_full_lock(tp, 0);
11935 tg3_rss_write_indir_tbl(tp);
11936 tg3_full_unlock(tp);
11941 static void tg3_get_channels(struct net_device *dev,
11942 struct ethtool_channels *channel)
11944 struct tg3 *tp = netdev_priv(dev);
11945 u32 deflt_qs = netif_get_num_default_rss_queues();
11947 channel->max_rx = tp->rxq_max;
11948 channel->max_tx = tp->txq_max;
11950 if (netif_running(dev)) {
11951 channel->rx_count = tp->rxq_cnt;
11952 channel->tx_count = tp->txq_cnt;
11955 channel->rx_count = tp->rxq_req;
11957 channel->rx_count = min(deflt_qs, tp->rxq_max);
11960 channel->tx_count = tp->txq_req;
11962 channel->tx_count = min(deflt_qs, tp->txq_max);
11966 static int tg3_set_channels(struct net_device *dev,
11967 struct ethtool_channels *channel)
11969 struct tg3 *tp = netdev_priv(dev);
11971 if (!tg3_flag(tp, SUPPORT_MSIX))
11972 return -EOPNOTSUPP;
11974 if (channel->rx_count > tp->rxq_max ||
11975 channel->tx_count > tp->txq_max)
11978 tp->rxq_req = channel->rx_count;
11979 tp->txq_req = channel->tx_count;
11981 if (!netif_running(dev))
11986 tg3_carrier_off(tp);
11988 tg3_start(tp, true, false, false);
11993 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11995 switch (stringset) {
11997 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12000 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12003 WARN_ON(1); /* we need a WARN() */
12008 static int tg3_set_phys_id(struct net_device *dev,
12009 enum ethtool_phys_id_state state)
12011 struct tg3 *tp = netdev_priv(dev);
12013 if (!netif_running(tp->dev))
12017 case ETHTOOL_ID_ACTIVE:
12018 return 1; /* cycle on/off once per second */
12020 case ETHTOOL_ID_ON:
12021 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12022 LED_CTRL_1000MBPS_ON |
12023 LED_CTRL_100MBPS_ON |
12024 LED_CTRL_10MBPS_ON |
12025 LED_CTRL_TRAFFIC_OVERRIDE |
12026 LED_CTRL_TRAFFIC_BLINK |
12027 LED_CTRL_TRAFFIC_LED);
12030 case ETHTOOL_ID_OFF:
12031 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12032 LED_CTRL_TRAFFIC_OVERRIDE);
12035 case ETHTOOL_ID_INACTIVE:
12036 tw32(MAC_LED_CTRL, tp->led_ctrl);
12043 static void tg3_get_ethtool_stats(struct net_device *dev,
12044 struct ethtool_stats *estats, u64 *tmp_stats)
12046 struct tg3 *tp = netdev_priv(dev);
12049 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12051 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12054 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12058 u32 offset = 0, len = 0;
12061 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12064 if (magic == TG3_EEPROM_MAGIC) {
12065 for (offset = TG3_NVM_DIR_START;
12066 offset < TG3_NVM_DIR_END;
12067 offset += TG3_NVM_DIRENT_SIZE) {
12068 if (tg3_nvram_read(tp, offset, &val))
12071 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12072 TG3_NVM_DIRTYPE_EXTVPD)
12076 if (offset != TG3_NVM_DIR_END) {
12077 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12078 if (tg3_nvram_read(tp, offset + 4, &offset))
12081 offset = tg3_nvram_logical_addr(tp, offset);
12085 if (!offset || !len) {
12086 offset = TG3_NVM_VPD_OFF;
12087 len = TG3_NVM_VPD_LEN;
12090 buf = kmalloc(len, GFP_KERNEL);
12094 if (magic == TG3_EEPROM_MAGIC) {
12095 for (i = 0; i < len; i += 4) {
12096 /* The data is in little-endian format in NVRAM.
12097 * Use the big-endian read routines to preserve
12098 * the byte order as it exists in NVRAM.
12100 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12106 unsigned int pos = 0;
12108 ptr = (u8 *)&buf[0];
12109 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12110 cnt = pci_read_vpd(tp->pdev, pos,
12112 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12130 #define NVRAM_TEST_SIZE 0x100
12131 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12132 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12133 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12134 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12135 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12136 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12137 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12138 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12140 static int tg3_test_nvram(struct tg3 *tp)
12142 u32 csum, magic, len;
12144 int i, j, k, err = 0, size;
12146 if (tg3_flag(tp, NO_NVRAM))
12149 if (tg3_nvram_read(tp, 0, &magic) != 0)
12152 if (magic == TG3_EEPROM_MAGIC)
12153 size = NVRAM_TEST_SIZE;
12154 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12155 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12156 TG3_EEPROM_SB_FORMAT_1) {
12157 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12158 case TG3_EEPROM_SB_REVISION_0:
12159 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12161 case TG3_EEPROM_SB_REVISION_2:
12162 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12164 case TG3_EEPROM_SB_REVISION_3:
12165 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12167 case TG3_EEPROM_SB_REVISION_4:
12168 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12170 case TG3_EEPROM_SB_REVISION_5:
12171 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12173 case TG3_EEPROM_SB_REVISION_6:
12174 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12181 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12182 size = NVRAM_SELFBOOT_HW_SIZE;
12186 buf = kmalloc(size, GFP_KERNEL);
12191 for (i = 0, j = 0; i < size; i += 4, j++) {
12192 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12199 /* Selfboot format */
12200 magic = be32_to_cpu(buf[0]);
12201 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12202 TG3_EEPROM_MAGIC_FW) {
12203 u8 *buf8 = (u8 *) buf, csum8 = 0;
12205 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12206 TG3_EEPROM_SB_REVISION_2) {
12207 /* For rev 2, the csum doesn't include the MBA. */
12208 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12210 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12213 for (i = 0; i < size; i++)
12226 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12227 TG3_EEPROM_MAGIC_HW) {
12228 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12229 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12230 u8 *buf8 = (u8 *) buf;
12232 /* Separate the parity bits and the data bytes. */
12233 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12234 if ((i == 0) || (i == 8)) {
12238 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12239 parity[k++] = buf8[i] & msk;
12241 } else if (i == 16) {
12245 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12246 parity[k++] = buf8[i] & msk;
12249 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12250 parity[k++] = buf8[i] & msk;
12253 data[j++] = buf8[i];
12257 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12258 u8 hw8 = hweight8(data[i]);
12260 if ((hw8 & 0x1) && parity[i])
12262 else if (!(hw8 & 0x1) && !parity[i])
12271 /* Bootstrap checksum at offset 0x10 */
12272 csum = calc_crc((unsigned char *) buf, 0x10);
12273 if (csum != le32_to_cpu(buf[0x10/4]))
12276 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12277 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12278 if (csum != le32_to_cpu(buf[0xfc/4]))
12283 buf = tg3_vpd_readblock(tp, &len);
12287 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12289 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12293 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12296 i += PCI_VPD_LRDT_TAG_SIZE;
12297 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12298 PCI_VPD_RO_KEYWORD_CHKSUM);
12302 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12304 for (i = 0; i <= j; i++)
12305 csum8 += ((u8 *)buf)[i];
12319 #define TG3_SERDES_TIMEOUT_SEC 2
12320 #define TG3_COPPER_TIMEOUT_SEC 6
12322 static int tg3_test_link(struct tg3 *tp)
12326 if (!netif_running(tp->dev))
12329 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12330 max = TG3_SERDES_TIMEOUT_SEC;
12332 max = TG3_COPPER_TIMEOUT_SEC;
12334 for (i = 0; i < max; i++) {
12338 if (msleep_interruptible(1000))
12345 /* Only test the commonly used registers */
12346 static int tg3_test_registers(struct tg3 *tp)
12348 int i, is_5705, is_5750;
12349 u32 offset, read_mask, write_mask, val, save_val, read_val;
12353 #define TG3_FL_5705 0x1
12354 #define TG3_FL_NOT_5705 0x2
12355 #define TG3_FL_NOT_5788 0x4
12356 #define TG3_FL_NOT_5750 0x8
12360 /* MAC Control Registers */
12361 { MAC_MODE, TG3_FL_NOT_5705,
12362 0x00000000, 0x00ef6f8c },
12363 { MAC_MODE, TG3_FL_5705,
12364 0x00000000, 0x01ef6b8c },
12365 { MAC_STATUS, TG3_FL_NOT_5705,
12366 0x03800107, 0x00000000 },
12367 { MAC_STATUS, TG3_FL_5705,
12368 0x03800100, 0x00000000 },
12369 { MAC_ADDR_0_HIGH, 0x0000,
12370 0x00000000, 0x0000ffff },
12371 { MAC_ADDR_0_LOW, 0x0000,
12372 0x00000000, 0xffffffff },
12373 { MAC_RX_MTU_SIZE, 0x0000,
12374 0x00000000, 0x0000ffff },
12375 { MAC_TX_MODE, 0x0000,
12376 0x00000000, 0x00000070 },
12377 { MAC_TX_LENGTHS, 0x0000,
12378 0x00000000, 0x00003fff },
12379 { MAC_RX_MODE, TG3_FL_NOT_5705,
12380 0x00000000, 0x000007fc },
12381 { MAC_RX_MODE, TG3_FL_5705,
12382 0x00000000, 0x000007dc },
12383 { MAC_HASH_REG_0, 0x0000,
12384 0x00000000, 0xffffffff },
12385 { MAC_HASH_REG_1, 0x0000,
12386 0x00000000, 0xffffffff },
12387 { MAC_HASH_REG_2, 0x0000,
12388 0x00000000, 0xffffffff },
12389 { MAC_HASH_REG_3, 0x0000,
12390 0x00000000, 0xffffffff },
12392 /* Receive Data and Receive BD Initiator Control Registers. */
12393 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12394 0x00000000, 0xffffffff },
12395 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12396 0x00000000, 0xffffffff },
12397 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12398 0x00000000, 0x00000003 },
12399 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12400 0x00000000, 0xffffffff },
12401 { RCVDBDI_STD_BD+0, 0x0000,
12402 0x00000000, 0xffffffff },
12403 { RCVDBDI_STD_BD+4, 0x0000,
12404 0x00000000, 0xffffffff },
12405 { RCVDBDI_STD_BD+8, 0x0000,
12406 0x00000000, 0xffff0002 },
12407 { RCVDBDI_STD_BD+0xc, 0x0000,
12408 0x00000000, 0xffffffff },
12410 /* Receive BD Initiator Control Registers. */
12411 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12412 0x00000000, 0xffffffff },
12413 { RCVBDI_STD_THRESH, TG3_FL_5705,
12414 0x00000000, 0x000003ff },
12415 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12416 0x00000000, 0xffffffff },
12418 /* Host Coalescing Control Registers. */
12419 { HOSTCC_MODE, TG3_FL_NOT_5705,
12420 0x00000000, 0x00000004 },
12421 { HOSTCC_MODE, TG3_FL_5705,
12422 0x00000000, 0x000000f6 },
12423 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12424 0x00000000, 0xffffffff },
12425 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12426 0x00000000, 0x000003ff },
12427 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12428 0x00000000, 0xffffffff },
12429 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12430 0x00000000, 0x000003ff },
12431 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12432 0x00000000, 0xffffffff },
12433 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12434 0x00000000, 0x000000ff },
12435 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12436 0x00000000, 0xffffffff },
12437 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12438 0x00000000, 0x000000ff },
12439 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12440 0x00000000, 0xffffffff },
12441 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12442 0x00000000, 0xffffffff },
12443 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12444 0x00000000, 0xffffffff },
12445 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12446 0x00000000, 0x000000ff },
12447 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12448 0x00000000, 0xffffffff },
12449 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12450 0x00000000, 0x000000ff },
12451 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12452 0x00000000, 0xffffffff },
12453 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12454 0x00000000, 0xffffffff },
12455 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12456 0x00000000, 0xffffffff },
12457 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12458 0x00000000, 0xffffffff },
12459 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12460 0x00000000, 0xffffffff },
12461 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12462 0xffffffff, 0x00000000 },
12463 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12464 0xffffffff, 0x00000000 },
12466 /* Buffer Manager Control Registers. */
12467 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12468 0x00000000, 0x007fff80 },
12469 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12470 0x00000000, 0x007fffff },
12471 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12472 0x00000000, 0x0000003f },
12473 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12474 0x00000000, 0x000001ff },
12475 { BUFMGR_MB_HIGH_WATER, 0x0000,
12476 0x00000000, 0x000001ff },
12477 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12478 0xffffffff, 0x00000000 },
12479 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12480 0xffffffff, 0x00000000 },
12482 /* Mailbox Registers */
12483 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12484 0x00000000, 0x000001ff },
12485 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12486 0x00000000, 0x000001ff },
12487 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12488 0x00000000, 0x000007ff },
12489 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12490 0x00000000, 0x000001ff },
12492 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12495 is_5705 = is_5750 = 0;
12496 if (tg3_flag(tp, 5705_PLUS)) {
12498 if (tg3_flag(tp, 5750_PLUS))
12502 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12503 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12506 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12509 if (tg3_flag(tp, IS_5788) &&
12510 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12513 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12516 offset = (u32) reg_tbl[i].offset;
12517 read_mask = reg_tbl[i].read_mask;
12518 write_mask = reg_tbl[i].write_mask;
12520 /* Save the original register content */
12521 save_val = tr32(offset);
12523 /* Determine the read-only value. */
12524 read_val = save_val & read_mask;
12526 /* Write zero to the register, then make sure the read-only bits
12527 * are not changed and the read/write bits are all zeros.
12531 val = tr32(offset);
12533 /* Test the read-only and read/write bits. */
12534 if (((val & read_mask) != read_val) || (val & write_mask))
12537 /* Write ones to all the bits defined by RdMask and WrMask, then
12538 * make sure the read-only bits are not changed and the
12539 * read/write bits are all ones.
12541 tw32(offset, read_mask | write_mask);
12543 val = tr32(offset);
12545 /* Test the read-only bits. */
12546 if ((val & read_mask) != read_val)
12549 /* Test the read/write bits. */
12550 if ((val & write_mask) != write_mask)
12553 tw32(offset, save_val);
12559 if (netif_msg_hw(tp))
12560 netdev_err(tp->dev,
12561 "Register test failed at offset %x\n", offset);
12562 tw32(offset, save_val);
12566 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12568 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12572 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12573 for (j = 0; j < len; j += 4) {
12576 tg3_write_mem(tp, offset + j, test_pattern[i]);
12577 tg3_read_mem(tp, offset + j, &val);
12578 if (val != test_pattern[i])
12585 static int tg3_test_memory(struct tg3 *tp)
12587 static struct mem_entry {
12590 } mem_tbl_570x[] = {
12591 { 0x00000000, 0x00b50},
12592 { 0x00002000, 0x1c000},
12593 { 0xffffffff, 0x00000}
12594 }, mem_tbl_5705[] = {
12595 { 0x00000100, 0x0000c},
12596 { 0x00000200, 0x00008},
12597 { 0x00004000, 0x00800},
12598 { 0x00006000, 0x01000},
12599 { 0x00008000, 0x02000},
12600 { 0x00010000, 0x0e000},
12601 { 0xffffffff, 0x00000}
12602 }, mem_tbl_5755[] = {
12603 { 0x00000200, 0x00008},
12604 { 0x00004000, 0x00800},
12605 { 0x00006000, 0x00800},
12606 { 0x00008000, 0x02000},
12607 { 0x00010000, 0x0c000},
12608 { 0xffffffff, 0x00000}
12609 }, mem_tbl_5906[] = {
12610 { 0x00000200, 0x00008},
12611 { 0x00004000, 0x00400},
12612 { 0x00006000, 0x00400},
12613 { 0x00008000, 0x01000},
12614 { 0x00010000, 0x01000},
12615 { 0xffffffff, 0x00000}
12616 }, mem_tbl_5717[] = {
12617 { 0x00000200, 0x00008},
12618 { 0x00010000, 0x0a000},
12619 { 0x00020000, 0x13c00},
12620 { 0xffffffff, 0x00000}
12621 }, mem_tbl_57765[] = {
12622 { 0x00000200, 0x00008},
12623 { 0x00004000, 0x00800},
12624 { 0x00006000, 0x09800},
12625 { 0x00010000, 0x0a000},
12626 { 0xffffffff, 0x00000}
12628 struct mem_entry *mem_tbl;
12632 if (tg3_flag(tp, 5717_PLUS))
12633 mem_tbl = mem_tbl_5717;
12634 else if (tg3_flag(tp, 57765_CLASS) ||
12635 tg3_asic_rev(tp) == ASIC_REV_5762)
12636 mem_tbl = mem_tbl_57765;
12637 else if (tg3_flag(tp, 5755_PLUS))
12638 mem_tbl = mem_tbl_5755;
12639 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12640 mem_tbl = mem_tbl_5906;
12641 else if (tg3_flag(tp, 5705_PLUS))
12642 mem_tbl = mem_tbl_5705;
12644 mem_tbl = mem_tbl_570x;
12646 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12647 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12655 #define TG3_TSO_MSS 500
12657 #define TG3_TSO_IP_HDR_LEN 20
12658 #define TG3_TSO_TCP_HDR_LEN 20
12659 #define TG3_TSO_TCP_OPT_LEN 12
12661 static const u8 tg3_tso_header[] = {
12663 0x45, 0x00, 0x00, 0x00,
12664 0x00, 0x00, 0x40, 0x00,
12665 0x40, 0x06, 0x00, 0x00,
12666 0x0a, 0x00, 0x00, 0x01,
12667 0x0a, 0x00, 0x00, 0x02,
12668 0x0d, 0x00, 0xe0, 0x00,
12669 0x00, 0x00, 0x01, 0x00,
12670 0x00, 0x00, 0x02, 0x00,
12671 0x80, 0x10, 0x10, 0x00,
12672 0x14, 0x09, 0x00, 0x00,
12673 0x01, 0x01, 0x08, 0x0a,
12674 0x11, 0x11, 0x11, 0x11,
12675 0x11, 0x11, 0x11, 0x11,
12678 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12680 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12681 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12683 struct sk_buff *skb;
12684 u8 *tx_data, *rx_data;
12686 int num_pkts, tx_len, rx_len, i, err;
12687 struct tg3_rx_buffer_desc *desc;
12688 struct tg3_napi *tnapi, *rnapi;
12689 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12691 tnapi = &tp->napi[0];
12692 rnapi = &tp->napi[0];
12693 if (tp->irq_cnt > 1) {
12694 if (tg3_flag(tp, ENABLE_RSS))
12695 rnapi = &tp->napi[1];
12696 if (tg3_flag(tp, ENABLE_TSS))
12697 tnapi = &tp->napi[1];
12699 coal_now = tnapi->coal_now | rnapi->coal_now;
12704 skb = netdev_alloc_skb(tp->dev, tx_len);
12708 tx_data = skb_put(skb, tx_len);
12709 memcpy(tx_data, tp->dev->dev_addr, 6);
12710 memset(tx_data + 6, 0x0, 8);
12712 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12714 if (tso_loopback) {
12715 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12717 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12718 TG3_TSO_TCP_OPT_LEN;
12720 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12721 sizeof(tg3_tso_header));
12724 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12725 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12727 /* Set the total length field in the IP header */
12728 iph->tot_len = htons((u16)(mss + hdr_len));
12730 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12731 TXD_FLAG_CPU_POST_DMA);
12733 if (tg3_flag(tp, HW_TSO_1) ||
12734 tg3_flag(tp, HW_TSO_2) ||
12735 tg3_flag(tp, HW_TSO_3)) {
12737 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12738 th = (struct tcphdr *)&tx_data[val];
12741 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12743 if (tg3_flag(tp, HW_TSO_3)) {
12744 mss |= (hdr_len & 0xc) << 12;
12745 if (hdr_len & 0x10)
12746 base_flags |= 0x00000010;
12747 base_flags |= (hdr_len & 0x3e0) << 5;
12748 } else if (tg3_flag(tp, HW_TSO_2))
12749 mss |= hdr_len << 9;
12750 else if (tg3_flag(tp, HW_TSO_1) ||
12751 tg3_asic_rev(tp) == ASIC_REV_5705) {
12752 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12754 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12757 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12760 data_off = ETH_HLEN;
12762 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12763 tx_len > VLAN_ETH_FRAME_LEN)
12764 base_flags |= TXD_FLAG_JMB_PKT;
12767 for (i = data_off; i < tx_len; i++)
12768 tx_data[i] = (u8) (i & 0xff);
12770 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12771 if (pci_dma_mapping_error(tp->pdev, map)) {
12772 dev_kfree_skb(skb);
12776 val = tnapi->tx_prod;
12777 tnapi->tx_buffers[val].skb = skb;
12778 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12780 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12785 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12787 budget = tg3_tx_avail(tnapi);
12788 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12789 base_flags | TXD_FLAG_END, mss, 0)) {
12790 tnapi->tx_buffers[val].skb = NULL;
12791 dev_kfree_skb(skb);
12797 /* Sync BD data before updating mailbox */
12800 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12801 tr32_mailbox(tnapi->prodmbox);
12805 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12806 for (i = 0; i < 35; i++) {
12807 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12812 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12813 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12814 if ((tx_idx == tnapi->tx_prod) &&
12815 (rx_idx == (rx_start_idx + num_pkts)))
12819 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12820 dev_kfree_skb(skb);
12822 if (tx_idx != tnapi->tx_prod)
12825 if (rx_idx != rx_start_idx + num_pkts)
12829 while (rx_idx != rx_start_idx) {
12830 desc = &rnapi->rx_rcb[rx_start_idx++];
12831 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12832 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12834 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12835 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12838 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12841 if (!tso_loopback) {
12842 if (rx_len != tx_len)
12845 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12846 if (opaque_key != RXD_OPAQUE_RING_STD)
12849 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12852 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12853 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12854 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12858 if (opaque_key == RXD_OPAQUE_RING_STD) {
12859 rx_data = tpr->rx_std_buffers[desc_idx].data;
12860 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12862 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12863 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12864 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12869 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12870 PCI_DMA_FROMDEVICE);
12872 rx_data += TG3_RX_OFFSET(tp);
12873 for (i = data_off; i < rx_len; i++, val++) {
12874 if (*(rx_data + i) != (u8) (val & 0xff))
12881 /* tg3_free_rings will unmap and free the rx_data */
12886 #define TG3_STD_LOOPBACK_FAILED 1
12887 #define TG3_JMB_LOOPBACK_FAILED 2
12888 #define TG3_TSO_LOOPBACK_FAILED 4
12889 #define TG3_LOOPBACK_FAILED \
12890 (TG3_STD_LOOPBACK_FAILED | \
12891 TG3_JMB_LOOPBACK_FAILED | \
12892 TG3_TSO_LOOPBACK_FAILED)
12894 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12898 u32 jmb_pkt_sz = 9000;
12901 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12903 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12904 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12906 if (!netif_running(tp->dev)) {
12907 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12908 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12910 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12914 err = tg3_reset_hw(tp, 1);
12916 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12917 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12919 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12923 if (tg3_flag(tp, ENABLE_RSS)) {
12926 /* Reroute all rx packets to the 1st queue */
12927 for (i = MAC_RSS_INDIR_TBL_0;
12928 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12932 /* HW errata - mac loopback fails in some cases on 5780.
12933 * Normal traffic and PHY loopback are not affected by
12934 * errata. Also, the MAC loopback test is deprecated for
12935 * all newer ASIC revisions.
12937 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12938 !tg3_flag(tp, CPMU_PRESENT)) {
12939 tg3_mac_loopback(tp, true);
12941 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12942 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12944 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12945 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12946 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12948 tg3_mac_loopback(tp, false);
12951 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12952 !tg3_flag(tp, USE_PHYLIB)) {
12955 tg3_phy_lpbk_set(tp, 0, false);
12957 /* Wait for link */
12958 for (i = 0; i < 100; i++) {
12959 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12964 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12965 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12966 if (tg3_flag(tp, TSO_CAPABLE) &&
12967 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12968 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12969 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12970 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12971 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12974 tg3_phy_lpbk_set(tp, 0, true);
12976 /* All link indications report up, but the hardware
12977 * isn't really ready for about 20 msec. Double it
12982 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12983 data[TG3_EXT_LOOPB_TEST] |=
12984 TG3_STD_LOOPBACK_FAILED;
12985 if (tg3_flag(tp, TSO_CAPABLE) &&
12986 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12987 data[TG3_EXT_LOOPB_TEST] |=
12988 TG3_TSO_LOOPBACK_FAILED;
12989 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12990 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12991 data[TG3_EXT_LOOPB_TEST] |=
12992 TG3_JMB_LOOPBACK_FAILED;
12995 /* Re-enable gphy autopowerdown. */
12996 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12997 tg3_phy_toggle_apd(tp, true);
13000 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13001 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13004 tp->phy_flags |= eee_cap;
13009 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13012 struct tg3 *tp = netdev_priv(dev);
13013 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13015 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13016 tg3_power_up(tp)) {
13017 etest->flags |= ETH_TEST_FL_FAILED;
13018 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13022 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13024 if (tg3_test_nvram(tp) != 0) {
13025 etest->flags |= ETH_TEST_FL_FAILED;
13026 data[TG3_NVRAM_TEST] = 1;
13028 if (!doextlpbk && tg3_test_link(tp)) {
13029 etest->flags |= ETH_TEST_FL_FAILED;
13030 data[TG3_LINK_TEST] = 1;
13032 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13033 int err, err2 = 0, irq_sync = 0;
13035 if (netif_running(dev)) {
13037 tg3_netif_stop(tp);
13041 tg3_full_lock(tp, irq_sync);
13042 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13043 err = tg3_nvram_lock(tp);
13044 tg3_halt_cpu(tp, RX_CPU_BASE);
13045 if (!tg3_flag(tp, 5705_PLUS))
13046 tg3_halt_cpu(tp, TX_CPU_BASE);
13048 tg3_nvram_unlock(tp);
13050 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13053 if (tg3_test_registers(tp) != 0) {
13054 etest->flags |= ETH_TEST_FL_FAILED;
13055 data[TG3_REGISTER_TEST] = 1;
13058 if (tg3_test_memory(tp) != 0) {
13059 etest->flags |= ETH_TEST_FL_FAILED;
13060 data[TG3_MEMORY_TEST] = 1;
13064 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13066 if (tg3_test_loopback(tp, data, doextlpbk))
13067 etest->flags |= ETH_TEST_FL_FAILED;
13069 tg3_full_unlock(tp);
13071 if (tg3_test_interrupt(tp) != 0) {
13072 etest->flags |= ETH_TEST_FL_FAILED;
13073 data[TG3_INTERRUPT_TEST] = 1;
13076 tg3_full_lock(tp, 0);
13078 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13079 if (netif_running(dev)) {
13080 tg3_flag_set(tp, INIT_COMPLETE);
13081 err2 = tg3_restart_hw(tp, 1);
13083 tg3_netif_start(tp);
13086 tg3_full_unlock(tp);
13088 if (irq_sync && !err2)
13091 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13092 tg3_power_down(tp);
13096 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13097 struct ifreq *ifr, int cmd)
13099 struct tg3 *tp = netdev_priv(dev);
13100 struct hwtstamp_config stmpconf;
13102 if (!tg3_flag(tp, PTP_CAPABLE))
13105 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13108 if (stmpconf.flags)
13111 switch (stmpconf.tx_type) {
13112 case HWTSTAMP_TX_ON:
13113 tg3_flag_set(tp, TX_TSTAMP_EN);
13115 case HWTSTAMP_TX_OFF:
13116 tg3_flag_clear(tp, TX_TSTAMP_EN);
13122 switch (stmpconf.rx_filter) {
13123 case HWTSTAMP_FILTER_NONE:
13126 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13127 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13128 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13130 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13131 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13132 TG3_RX_PTP_CTL_SYNC_EVNT;
13134 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13135 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13136 TG3_RX_PTP_CTL_DELAY_REQ;
13138 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13139 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13140 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13142 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13143 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13144 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13146 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13147 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13148 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13150 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13151 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13152 TG3_RX_PTP_CTL_SYNC_EVNT;
13154 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13155 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13156 TG3_RX_PTP_CTL_SYNC_EVNT;
13158 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13159 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13160 TG3_RX_PTP_CTL_SYNC_EVNT;
13162 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13163 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13164 TG3_RX_PTP_CTL_DELAY_REQ;
13166 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13167 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13168 TG3_RX_PTP_CTL_DELAY_REQ;
13170 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13171 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13172 TG3_RX_PTP_CTL_DELAY_REQ;
13178 if (netif_running(dev) && tp->rxptpctl)
13179 tw32(TG3_RX_PTP_CTL,
13180 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13182 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13186 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13188 struct mii_ioctl_data *data = if_mii(ifr);
13189 struct tg3 *tp = netdev_priv(dev);
13192 if (tg3_flag(tp, USE_PHYLIB)) {
13193 struct phy_device *phydev;
13194 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13196 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13197 return phy_mii_ioctl(phydev, ifr, cmd);
13202 data->phy_id = tp->phy_addr;
13205 case SIOCGMIIREG: {
13208 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13209 break; /* We have no PHY */
13211 if (!netif_running(dev))
13214 spin_lock_bh(&tp->lock);
13215 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13216 data->reg_num & 0x1f, &mii_regval);
13217 spin_unlock_bh(&tp->lock);
13219 data->val_out = mii_regval;
13225 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13226 break; /* We have no PHY */
13228 if (!netif_running(dev))
13231 spin_lock_bh(&tp->lock);
13232 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13233 data->reg_num & 0x1f, data->val_in);
13234 spin_unlock_bh(&tp->lock);
13238 case SIOCSHWTSTAMP:
13239 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13245 return -EOPNOTSUPP;
13248 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13250 struct tg3 *tp = netdev_priv(dev);
13252 memcpy(ec, &tp->coal, sizeof(*ec));
13256 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13258 struct tg3 *tp = netdev_priv(dev);
13259 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13260 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13262 if (!tg3_flag(tp, 5705_PLUS)) {
13263 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13264 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13265 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13266 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13269 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13270 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13271 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13272 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13273 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13274 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13275 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13276 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13277 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13278 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13281 /* No rx interrupts will be generated if both are zero */
13282 if ((ec->rx_coalesce_usecs == 0) &&
13283 (ec->rx_max_coalesced_frames == 0))
13286 /* No tx interrupts will be generated if both are zero */
13287 if ((ec->tx_coalesce_usecs == 0) &&
13288 (ec->tx_max_coalesced_frames == 0))
13291 /* Only copy relevant parameters, ignore all others. */
13292 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13293 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13294 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13295 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13296 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13297 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13298 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13299 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13300 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13302 if (netif_running(dev)) {
13303 tg3_full_lock(tp, 0);
13304 __tg3_set_coalesce(tp, &tp->coal);
13305 tg3_full_unlock(tp);
13310 static const struct ethtool_ops tg3_ethtool_ops = {
13311 .get_settings = tg3_get_settings,
13312 .set_settings = tg3_set_settings,
13313 .get_drvinfo = tg3_get_drvinfo,
13314 .get_regs_len = tg3_get_regs_len,
13315 .get_regs = tg3_get_regs,
13316 .get_wol = tg3_get_wol,
13317 .set_wol = tg3_set_wol,
13318 .get_msglevel = tg3_get_msglevel,
13319 .set_msglevel = tg3_set_msglevel,
13320 .nway_reset = tg3_nway_reset,
13321 .get_link = ethtool_op_get_link,
13322 .get_eeprom_len = tg3_get_eeprom_len,
13323 .get_eeprom = tg3_get_eeprom,
13324 .set_eeprom = tg3_set_eeprom,
13325 .get_ringparam = tg3_get_ringparam,
13326 .set_ringparam = tg3_set_ringparam,
13327 .get_pauseparam = tg3_get_pauseparam,
13328 .set_pauseparam = tg3_set_pauseparam,
13329 .self_test = tg3_self_test,
13330 .get_strings = tg3_get_strings,
13331 .set_phys_id = tg3_set_phys_id,
13332 .get_ethtool_stats = tg3_get_ethtool_stats,
13333 .get_coalesce = tg3_get_coalesce,
13334 .set_coalesce = tg3_set_coalesce,
13335 .get_sset_count = tg3_get_sset_count,
13336 .get_rxnfc = tg3_get_rxnfc,
13337 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13338 .get_rxfh_indir = tg3_get_rxfh_indir,
13339 .set_rxfh_indir = tg3_set_rxfh_indir,
13340 .get_channels = tg3_get_channels,
13341 .set_channels = tg3_set_channels,
13342 .get_ts_info = tg3_get_ts_info,
13345 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13346 struct rtnl_link_stats64 *stats)
13348 struct tg3 *tp = netdev_priv(dev);
13350 spin_lock_bh(&tp->lock);
13351 if (!tp->hw_stats) {
13352 spin_unlock_bh(&tp->lock);
13353 return &tp->net_stats_prev;
13356 tg3_get_nstats(tp, stats);
13357 spin_unlock_bh(&tp->lock);
13362 static void tg3_set_rx_mode(struct net_device *dev)
13364 struct tg3 *tp = netdev_priv(dev);
13366 if (!netif_running(dev))
13369 tg3_full_lock(tp, 0);
13370 __tg3_set_rx_mode(dev);
13371 tg3_full_unlock(tp);
13374 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13377 dev->mtu = new_mtu;
13379 if (new_mtu > ETH_DATA_LEN) {
13380 if (tg3_flag(tp, 5780_CLASS)) {
13381 netdev_update_features(dev);
13382 tg3_flag_clear(tp, TSO_CAPABLE);
13384 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13387 if (tg3_flag(tp, 5780_CLASS)) {
13388 tg3_flag_set(tp, TSO_CAPABLE);
13389 netdev_update_features(dev);
13391 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13395 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13397 struct tg3 *tp = netdev_priv(dev);
13398 int err, reset_phy = 0;
13400 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13403 if (!netif_running(dev)) {
13404 /* We'll just catch it later when the
13407 tg3_set_mtu(dev, tp, new_mtu);
13413 tg3_netif_stop(tp);
13415 tg3_full_lock(tp, 1);
13417 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13419 tg3_set_mtu(dev, tp, new_mtu);
13421 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13422 * breaks all requests to 256 bytes.
13424 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13427 err = tg3_restart_hw(tp, reset_phy);
13430 tg3_netif_start(tp);
13432 tg3_full_unlock(tp);
13440 static const struct net_device_ops tg3_netdev_ops = {
13441 .ndo_open = tg3_open,
13442 .ndo_stop = tg3_close,
13443 .ndo_start_xmit = tg3_start_xmit,
13444 .ndo_get_stats64 = tg3_get_stats64,
13445 .ndo_validate_addr = eth_validate_addr,
13446 .ndo_set_rx_mode = tg3_set_rx_mode,
13447 .ndo_set_mac_address = tg3_set_mac_addr,
13448 .ndo_do_ioctl = tg3_ioctl,
13449 .ndo_tx_timeout = tg3_tx_timeout,
13450 .ndo_change_mtu = tg3_change_mtu,
13451 .ndo_fix_features = tg3_fix_features,
13452 .ndo_set_features = tg3_set_features,
13453 #ifdef CONFIG_NET_POLL_CONTROLLER
13454 .ndo_poll_controller = tg3_poll_controller,
13458 static void tg3_get_eeprom_size(struct tg3 *tp)
13460 u32 cursize, val, magic;
13462 tp->nvram_size = EEPROM_CHIP_SIZE;
13464 if (tg3_nvram_read(tp, 0, &magic) != 0)
13467 if ((magic != TG3_EEPROM_MAGIC) &&
13468 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13469 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13473 * Size the chip by reading offsets at increasing powers of two.
13474 * When we encounter our validation signature, we know the addressing
13475 * has wrapped around, and thus have our chip size.
13479 while (cursize < tp->nvram_size) {
13480 if (tg3_nvram_read(tp, cursize, &val) != 0)
13489 tp->nvram_size = cursize;
13492 static void tg3_get_nvram_size(struct tg3 *tp)
13496 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13499 /* Selfboot format */
13500 if (val != TG3_EEPROM_MAGIC) {
13501 tg3_get_eeprom_size(tp);
13505 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13507 /* This is confusing. We want to operate on the
13508 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13509 * call will read from NVRAM and byteswap the data
13510 * according to the byteswapping settings for all
13511 * other register accesses. This ensures the data we
13512 * want will always reside in the lower 16-bits.
13513 * However, the data in NVRAM is in LE format, which
13514 * means the data from the NVRAM read will always be
13515 * opposite the endianness of the CPU. The 16-bit
13516 * byteswap then brings the data to CPU endianness.
13518 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13522 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13525 static void tg3_get_nvram_info(struct tg3 *tp)
13529 nvcfg1 = tr32(NVRAM_CFG1);
13530 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13531 tg3_flag_set(tp, FLASH);
13533 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13534 tw32(NVRAM_CFG1, nvcfg1);
13537 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13538 tg3_flag(tp, 5780_CLASS)) {
13539 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13540 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13541 tp->nvram_jedecnum = JEDEC_ATMEL;
13542 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13543 tg3_flag_set(tp, NVRAM_BUFFERED);
13545 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13546 tp->nvram_jedecnum = JEDEC_ATMEL;
13547 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13549 case FLASH_VENDOR_ATMEL_EEPROM:
13550 tp->nvram_jedecnum = JEDEC_ATMEL;
13551 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13552 tg3_flag_set(tp, NVRAM_BUFFERED);
13554 case FLASH_VENDOR_ST:
13555 tp->nvram_jedecnum = JEDEC_ST;
13556 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13557 tg3_flag_set(tp, NVRAM_BUFFERED);
13559 case FLASH_VENDOR_SAIFUN:
13560 tp->nvram_jedecnum = JEDEC_SAIFUN;
13561 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13563 case FLASH_VENDOR_SST_SMALL:
13564 case FLASH_VENDOR_SST_LARGE:
13565 tp->nvram_jedecnum = JEDEC_SST;
13566 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13570 tp->nvram_jedecnum = JEDEC_ATMEL;
13571 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13572 tg3_flag_set(tp, NVRAM_BUFFERED);
13576 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13578 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13579 case FLASH_5752PAGE_SIZE_256:
13580 tp->nvram_pagesize = 256;
13582 case FLASH_5752PAGE_SIZE_512:
13583 tp->nvram_pagesize = 512;
13585 case FLASH_5752PAGE_SIZE_1K:
13586 tp->nvram_pagesize = 1024;
13588 case FLASH_5752PAGE_SIZE_2K:
13589 tp->nvram_pagesize = 2048;
13591 case FLASH_5752PAGE_SIZE_4K:
13592 tp->nvram_pagesize = 4096;
13594 case FLASH_5752PAGE_SIZE_264:
13595 tp->nvram_pagesize = 264;
13597 case FLASH_5752PAGE_SIZE_528:
13598 tp->nvram_pagesize = 528;
13603 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13607 nvcfg1 = tr32(NVRAM_CFG1);
13609 /* NVRAM protection for TPM */
13610 if (nvcfg1 & (1 << 27))
13611 tg3_flag_set(tp, PROTECTED_NVRAM);
13613 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13614 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13615 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13616 tp->nvram_jedecnum = JEDEC_ATMEL;
13617 tg3_flag_set(tp, NVRAM_BUFFERED);
13619 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13620 tp->nvram_jedecnum = JEDEC_ATMEL;
13621 tg3_flag_set(tp, NVRAM_BUFFERED);
13622 tg3_flag_set(tp, FLASH);
13624 case FLASH_5752VENDOR_ST_M45PE10:
13625 case FLASH_5752VENDOR_ST_M45PE20:
13626 case FLASH_5752VENDOR_ST_M45PE40:
13627 tp->nvram_jedecnum = JEDEC_ST;
13628 tg3_flag_set(tp, NVRAM_BUFFERED);
13629 tg3_flag_set(tp, FLASH);
13633 if (tg3_flag(tp, FLASH)) {
13634 tg3_nvram_get_pagesize(tp, nvcfg1);
13636 /* For eeprom, set pagesize to maximum eeprom size */
13637 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13639 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13640 tw32(NVRAM_CFG1, nvcfg1);
13644 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13646 u32 nvcfg1, protect = 0;
13648 nvcfg1 = tr32(NVRAM_CFG1);
13650 /* NVRAM protection for TPM */
13651 if (nvcfg1 & (1 << 27)) {
13652 tg3_flag_set(tp, PROTECTED_NVRAM);
13656 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13658 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13659 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13660 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13661 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13662 tp->nvram_jedecnum = JEDEC_ATMEL;
13663 tg3_flag_set(tp, NVRAM_BUFFERED);
13664 tg3_flag_set(tp, FLASH);
13665 tp->nvram_pagesize = 264;
13666 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13667 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13668 tp->nvram_size = (protect ? 0x3e200 :
13669 TG3_NVRAM_SIZE_512KB);
13670 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13671 tp->nvram_size = (protect ? 0x1f200 :
13672 TG3_NVRAM_SIZE_256KB);
13674 tp->nvram_size = (protect ? 0x1f200 :
13675 TG3_NVRAM_SIZE_128KB);
13677 case FLASH_5752VENDOR_ST_M45PE10:
13678 case FLASH_5752VENDOR_ST_M45PE20:
13679 case FLASH_5752VENDOR_ST_M45PE40:
13680 tp->nvram_jedecnum = JEDEC_ST;
13681 tg3_flag_set(tp, NVRAM_BUFFERED);
13682 tg3_flag_set(tp, FLASH);
13683 tp->nvram_pagesize = 256;
13684 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13685 tp->nvram_size = (protect ?
13686 TG3_NVRAM_SIZE_64KB :
13687 TG3_NVRAM_SIZE_128KB);
13688 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13689 tp->nvram_size = (protect ?
13690 TG3_NVRAM_SIZE_64KB :
13691 TG3_NVRAM_SIZE_256KB);
13693 tp->nvram_size = (protect ?
13694 TG3_NVRAM_SIZE_128KB :
13695 TG3_NVRAM_SIZE_512KB);
13700 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13704 nvcfg1 = tr32(NVRAM_CFG1);
13706 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13707 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13708 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13709 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13710 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13711 tp->nvram_jedecnum = JEDEC_ATMEL;
13712 tg3_flag_set(tp, NVRAM_BUFFERED);
13713 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13715 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13716 tw32(NVRAM_CFG1, nvcfg1);
13718 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13719 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13720 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13721 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13722 tp->nvram_jedecnum = JEDEC_ATMEL;
13723 tg3_flag_set(tp, NVRAM_BUFFERED);
13724 tg3_flag_set(tp, FLASH);
13725 tp->nvram_pagesize = 264;
13727 case FLASH_5752VENDOR_ST_M45PE10:
13728 case FLASH_5752VENDOR_ST_M45PE20:
13729 case FLASH_5752VENDOR_ST_M45PE40:
13730 tp->nvram_jedecnum = JEDEC_ST;
13731 tg3_flag_set(tp, NVRAM_BUFFERED);
13732 tg3_flag_set(tp, FLASH);
13733 tp->nvram_pagesize = 256;
13738 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13740 u32 nvcfg1, protect = 0;
13742 nvcfg1 = tr32(NVRAM_CFG1);
13744 /* NVRAM protection for TPM */
13745 if (nvcfg1 & (1 << 27)) {
13746 tg3_flag_set(tp, PROTECTED_NVRAM);
13750 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13752 case FLASH_5761VENDOR_ATMEL_ADB021D:
13753 case FLASH_5761VENDOR_ATMEL_ADB041D:
13754 case FLASH_5761VENDOR_ATMEL_ADB081D:
13755 case FLASH_5761VENDOR_ATMEL_ADB161D:
13756 case FLASH_5761VENDOR_ATMEL_MDB021D:
13757 case FLASH_5761VENDOR_ATMEL_MDB041D:
13758 case FLASH_5761VENDOR_ATMEL_MDB081D:
13759 case FLASH_5761VENDOR_ATMEL_MDB161D:
13760 tp->nvram_jedecnum = JEDEC_ATMEL;
13761 tg3_flag_set(tp, NVRAM_BUFFERED);
13762 tg3_flag_set(tp, FLASH);
13763 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13764 tp->nvram_pagesize = 256;
13766 case FLASH_5761VENDOR_ST_A_M45PE20:
13767 case FLASH_5761VENDOR_ST_A_M45PE40:
13768 case FLASH_5761VENDOR_ST_A_M45PE80:
13769 case FLASH_5761VENDOR_ST_A_M45PE16:
13770 case FLASH_5761VENDOR_ST_M_M45PE20:
13771 case FLASH_5761VENDOR_ST_M_M45PE40:
13772 case FLASH_5761VENDOR_ST_M_M45PE80:
13773 case FLASH_5761VENDOR_ST_M_M45PE16:
13774 tp->nvram_jedecnum = JEDEC_ST;
13775 tg3_flag_set(tp, NVRAM_BUFFERED);
13776 tg3_flag_set(tp, FLASH);
13777 tp->nvram_pagesize = 256;
13782 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13785 case FLASH_5761VENDOR_ATMEL_ADB161D:
13786 case FLASH_5761VENDOR_ATMEL_MDB161D:
13787 case FLASH_5761VENDOR_ST_A_M45PE16:
13788 case FLASH_5761VENDOR_ST_M_M45PE16:
13789 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13791 case FLASH_5761VENDOR_ATMEL_ADB081D:
13792 case FLASH_5761VENDOR_ATMEL_MDB081D:
13793 case FLASH_5761VENDOR_ST_A_M45PE80:
13794 case FLASH_5761VENDOR_ST_M_M45PE80:
13795 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13797 case FLASH_5761VENDOR_ATMEL_ADB041D:
13798 case FLASH_5761VENDOR_ATMEL_MDB041D:
13799 case FLASH_5761VENDOR_ST_A_M45PE40:
13800 case FLASH_5761VENDOR_ST_M_M45PE40:
13801 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13803 case FLASH_5761VENDOR_ATMEL_ADB021D:
13804 case FLASH_5761VENDOR_ATMEL_MDB021D:
13805 case FLASH_5761VENDOR_ST_A_M45PE20:
13806 case FLASH_5761VENDOR_ST_M_M45PE20:
13807 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13813 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13815 tp->nvram_jedecnum = JEDEC_ATMEL;
13816 tg3_flag_set(tp, NVRAM_BUFFERED);
13817 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13820 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13824 nvcfg1 = tr32(NVRAM_CFG1);
13826 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13827 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13828 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13829 tp->nvram_jedecnum = JEDEC_ATMEL;
13830 tg3_flag_set(tp, NVRAM_BUFFERED);
13831 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13833 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13834 tw32(NVRAM_CFG1, nvcfg1);
13836 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13837 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13838 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13839 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13840 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13841 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13842 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13843 tp->nvram_jedecnum = JEDEC_ATMEL;
13844 tg3_flag_set(tp, NVRAM_BUFFERED);
13845 tg3_flag_set(tp, FLASH);
13847 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13848 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13849 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13850 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13851 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13853 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13854 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13855 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13857 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13858 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13859 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13863 case FLASH_5752VENDOR_ST_M45PE10:
13864 case FLASH_5752VENDOR_ST_M45PE20:
13865 case FLASH_5752VENDOR_ST_M45PE40:
13866 tp->nvram_jedecnum = JEDEC_ST;
13867 tg3_flag_set(tp, NVRAM_BUFFERED);
13868 tg3_flag_set(tp, FLASH);
13870 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13871 case FLASH_5752VENDOR_ST_M45PE10:
13872 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13874 case FLASH_5752VENDOR_ST_M45PE20:
13875 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13877 case FLASH_5752VENDOR_ST_M45PE40:
13878 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13883 tg3_flag_set(tp, NO_NVRAM);
13887 tg3_nvram_get_pagesize(tp, nvcfg1);
13888 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13889 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13893 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13897 nvcfg1 = tr32(NVRAM_CFG1);
13899 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13900 case FLASH_5717VENDOR_ATMEL_EEPROM:
13901 case FLASH_5717VENDOR_MICRO_EEPROM:
13902 tp->nvram_jedecnum = JEDEC_ATMEL;
13903 tg3_flag_set(tp, NVRAM_BUFFERED);
13904 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13906 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13907 tw32(NVRAM_CFG1, nvcfg1);
13909 case FLASH_5717VENDOR_ATMEL_MDB011D:
13910 case FLASH_5717VENDOR_ATMEL_ADB011B:
13911 case FLASH_5717VENDOR_ATMEL_ADB011D:
13912 case FLASH_5717VENDOR_ATMEL_MDB021D:
13913 case FLASH_5717VENDOR_ATMEL_ADB021B:
13914 case FLASH_5717VENDOR_ATMEL_ADB021D:
13915 case FLASH_5717VENDOR_ATMEL_45USPT:
13916 tp->nvram_jedecnum = JEDEC_ATMEL;
13917 tg3_flag_set(tp, NVRAM_BUFFERED);
13918 tg3_flag_set(tp, FLASH);
13920 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13921 case FLASH_5717VENDOR_ATMEL_MDB021D:
13922 /* Detect size with tg3_nvram_get_size() */
13924 case FLASH_5717VENDOR_ATMEL_ADB021B:
13925 case FLASH_5717VENDOR_ATMEL_ADB021D:
13926 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13929 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13933 case FLASH_5717VENDOR_ST_M_M25PE10:
13934 case FLASH_5717VENDOR_ST_A_M25PE10:
13935 case FLASH_5717VENDOR_ST_M_M45PE10:
13936 case FLASH_5717VENDOR_ST_A_M45PE10:
13937 case FLASH_5717VENDOR_ST_M_M25PE20:
13938 case FLASH_5717VENDOR_ST_A_M25PE20:
13939 case FLASH_5717VENDOR_ST_M_M45PE20:
13940 case FLASH_5717VENDOR_ST_A_M45PE20:
13941 case FLASH_5717VENDOR_ST_25USPT:
13942 case FLASH_5717VENDOR_ST_45USPT:
13943 tp->nvram_jedecnum = JEDEC_ST;
13944 tg3_flag_set(tp, NVRAM_BUFFERED);
13945 tg3_flag_set(tp, FLASH);
13947 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13948 case FLASH_5717VENDOR_ST_M_M25PE20:
13949 case FLASH_5717VENDOR_ST_M_M45PE20:
13950 /* Detect size with tg3_nvram_get_size() */
13952 case FLASH_5717VENDOR_ST_A_M25PE20:
13953 case FLASH_5717VENDOR_ST_A_M45PE20:
13954 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13957 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13962 tg3_flag_set(tp, NO_NVRAM);
13966 tg3_nvram_get_pagesize(tp, nvcfg1);
13967 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13968 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13971 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13973 u32 nvcfg1, nvmpinstrp;
13975 nvcfg1 = tr32(NVRAM_CFG1);
13976 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13978 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13979 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13980 tg3_flag_set(tp, NO_NVRAM);
13984 switch (nvmpinstrp) {
13985 case FLASH_5762_EEPROM_HD:
13986 nvmpinstrp = FLASH_5720_EEPROM_HD;
13988 case FLASH_5762_EEPROM_LD:
13989 nvmpinstrp = FLASH_5720_EEPROM_LD;
13994 switch (nvmpinstrp) {
13995 case FLASH_5720_EEPROM_HD:
13996 case FLASH_5720_EEPROM_LD:
13997 tp->nvram_jedecnum = JEDEC_ATMEL;
13998 tg3_flag_set(tp, NVRAM_BUFFERED);
14000 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14001 tw32(NVRAM_CFG1, nvcfg1);
14002 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14003 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14005 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14007 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14008 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14009 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14010 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14011 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14012 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14013 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14014 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14015 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14016 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14017 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14018 case FLASH_5720VENDOR_ATMEL_45USPT:
14019 tp->nvram_jedecnum = JEDEC_ATMEL;
14020 tg3_flag_set(tp, NVRAM_BUFFERED);
14021 tg3_flag_set(tp, FLASH);
14023 switch (nvmpinstrp) {
14024 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14025 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14026 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14027 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14029 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14030 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14031 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14032 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14034 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14035 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14036 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14039 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14040 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14044 case FLASH_5720VENDOR_M_ST_M25PE10:
14045 case FLASH_5720VENDOR_M_ST_M45PE10:
14046 case FLASH_5720VENDOR_A_ST_M25PE10:
14047 case FLASH_5720VENDOR_A_ST_M45PE10:
14048 case FLASH_5720VENDOR_M_ST_M25PE20:
14049 case FLASH_5720VENDOR_M_ST_M45PE20:
14050 case FLASH_5720VENDOR_A_ST_M25PE20:
14051 case FLASH_5720VENDOR_A_ST_M45PE20:
14052 case FLASH_5720VENDOR_M_ST_M25PE40:
14053 case FLASH_5720VENDOR_M_ST_M45PE40:
14054 case FLASH_5720VENDOR_A_ST_M25PE40:
14055 case FLASH_5720VENDOR_A_ST_M45PE40:
14056 case FLASH_5720VENDOR_M_ST_M25PE80:
14057 case FLASH_5720VENDOR_M_ST_M45PE80:
14058 case FLASH_5720VENDOR_A_ST_M25PE80:
14059 case FLASH_5720VENDOR_A_ST_M45PE80:
14060 case FLASH_5720VENDOR_ST_25USPT:
14061 case FLASH_5720VENDOR_ST_45USPT:
14062 tp->nvram_jedecnum = JEDEC_ST;
14063 tg3_flag_set(tp, NVRAM_BUFFERED);
14064 tg3_flag_set(tp, FLASH);
14066 switch (nvmpinstrp) {
14067 case FLASH_5720VENDOR_M_ST_M25PE20:
14068 case FLASH_5720VENDOR_M_ST_M45PE20:
14069 case FLASH_5720VENDOR_A_ST_M25PE20:
14070 case FLASH_5720VENDOR_A_ST_M45PE20:
14071 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14073 case FLASH_5720VENDOR_M_ST_M25PE40:
14074 case FLASH_5720VENDOR_M_ST_M45PE40:
14075 case FLASH_5720VENDOR_A_ST_M25PE40:
14076 case FLASH_5720VENDOR_A_ST_M45PE40:
14077 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14079 case FLASH_5720VENDOR_M_ST_M25PE80:
14080 case FLASH_5720VENDOR_M_ST_M45PE80:
14081 case FLASH_5720VENDOR_A_ST_M25PE80:
14082 case FLASH_5720VENDOR_A_ST_M45PE80:
14083 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14086 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14087 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14092 tg3_flag_set(tp, NO_NVRAM);
14096 tg3_nvram_get_pagesize(tp, nvcfg1);
14097 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14098 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14100 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14103 if (tg3_nvram_read(tp, 0, &val))
14106 if (val != TG3_EEPROM_MAGIC &&
14107 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14108 tg3_flag_set(tp, NO_NVRAM);
14112 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14113 static void tg3_nvram_init(struct tg3 *tp)
14115 if (tg3_flag(tp, IS_SSB_CORE)) {
14116 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14117 tg3_flag_clear(tp, NVRAM);
14118 tg3_flag_clear(tp, NVRAM_BUFFERED);
14119 tg3_flag_set(tp, NO_NVRAM);
14123 tw32_f(GRC_EEPROM_ADDR,
14124 (EEPROM_ADDR_FSM_RESET |
14125 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14126 EEPROM_ADDR_CLKPERD_SHIFT)));
14130 /* Enable seeprom accesses. */
14131 tw32_f(GRC_LOCAL_CTRL,
14132 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14135 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14136 tg3_asic_rev(tp) != ASIC_REV_5701) {
14137 tg3_flag_set(tp, NVRAM);
14139 if (tg3_nvram_lock(tp)) {
14140 netdev_warn(tp->dev,
14141 "Cannot get nvram lock, %s failed\n",
14145 tg3_enable_nvram_access(tp);
14147 tp->nvram_size = 0;
14149 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14150 tg3_get_5752_nvram_info(tp);
14151 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14152 tg3_get_5755_nvram_info(tp);
14153 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14154 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14155 tg3_asic_rev(tp) == ASIC_REV_5785)
14156 tg3_get_5787_nvram_info(tp);
14157 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14158 tg3_get_5761_nvram_info(tp);
14159 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14160 tg3_get_5906_nvram_info(tp);
14161 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14162 tg3_flag(tp, 57765_CLASS))
14163 tg3_get_57780_nvram_info(tp);
14164 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14165 tg3_asic_rev(tp) == ASIC_REV_5719)
14166 tg3_get_5717_nvram_info(tp);
14167 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14168 tg3_asic_rev(tp) == ASIC_REV_5762)
14169 tg3_get_5720_nvram_info(tp);
14171 tg3_get_nvram_info(tp);
14173 if (tp->nvram_size == 0)
14174 tg3_get_nvram_size(tp);
14176 tg3_disable_nvram_access(tp);
14177 tg3_nvram_unlock(tp);
14180 tg3_flag_clear(tp, NVRAM);
14181 tg3_flag_clear(tp, NVRAM_BUFFERED);
14183 tg3_get_eeprom_size(tp);
14187 struct subsys_tbl_ent {
14188 u16 subsys_vendor, subsys_devid;
14192 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14193 /* Broadcom boards. */
14194 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14195 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14196 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14197 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14198 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14199 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14200 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14201 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14202 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14203 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14204 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14205 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14206 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14207 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14208 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14209 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14210 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14211 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14212 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14213 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14214 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14215 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14218 { TG3PCI_SUBVENDOR_ID_3COM,
14219 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14220 { TG3PCI_SUBVENDOR_ID_3COM,
14221 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14222 { TG3PCI_SUBVENDOR_ID_3COM,
14223 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14224 { TG3PCI_SUBVENDOR_ID_3COM,
14225 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14226 { TG3PCI_SUBVENDOR_ID_3COM,
14227 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14230 { TG3PCI_SUBVENDOR_ID_DELL,
14231 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14232 { TG3PCI_SUBVENDOR_ID_DELL,
14233 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14234 { TG3PCI_SUBVENDOR_ID_DELL,
14235 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14236 { TG3PCI_SUBVENDOR_ID_DELL,
14237 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14239 /* Compaq boards. */
14240 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14241 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14242 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14243 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14244 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14245 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14246 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14247 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14248 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14249 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14252 { TG3PCI_SUBVENDOR_ID_IBM,
14253 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14256 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14260 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14261 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14262 tp->pdev->subsystem_vendor) &&
14263 (subsys_id_to_phy_id[i].subsys_devid ==
14264 tp->pdev->subsystem_device))
14265 return &subsys_id_to_phy_id[i];
14270 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14274 tp->phy_id = TG3_PHY_ID_INVALID;
14275 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14277 /* Assume an onboard device and WOL capable by default. */
14278 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14279 tg3_flag_set(tp, WOL_CAP);
14281 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14282 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14283 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14284 tg3_flag_set(tp, IS_NIC);
14286 val = tr32(VCPU_CFGSHDW);
14287 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14288 tg3_flag_set(tp, ASPM_WORKAROUND);
14289 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14290 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14291 tg3_flag_set(tp, WOL_ENABLE);
14292 device_set_wakeup_enable(&tp->pdev->dev, true);
14297 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14298 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14299 u32 nic_cfg, led_cfg;
14300 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14301 int eeprom_phy_serdes = 0;
14303 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14304 tp->nic_sram_data_cfg = nic_cfg;
14306 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14307 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14308 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14309 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14310 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14311 (ver > 0) && (ver < 0x100))
14312 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14314 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14315 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14317 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14318 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14319 eeprom_phy_serdes = 1;
14321 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14322 if (nic_phy_id != 0) {
14323 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14324 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14326 eeprom_phy_id = (id1 >> 16) << 10;
14327 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14328 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14332 tp->phy_id = eeprom_phy_id;
14333 if (eeprom_phy_serdes) {
14334 if (!tg3_flag(tp, 5705_PLUS))
14335 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14337 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14340 if (tg3_flag(tp, 5750_PLUS))
14341 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14342 SHASTA_EXT_LED_MODE_MASK);
14344 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14348 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14349 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14352 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14353 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14356 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14357 tp->led_ctrl = LED_CTRL_MODE_MAC;
14359 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14360 * read on some older 5700/5701 bootcode.
14362 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14363 tg3_asic_rev(tp) == ASIC_REV_5701)
14364 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14368 case SHASTA_EXT_LED_SHARED:
14369 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14370 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14371 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14372 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14373 LED_CTRL_MODE_PHY_2);
14376 case SHASTA_EXT_LED_MAC:
14377 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14380 case SHASTA_EXT_LED_COMBO:
14381 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14382 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14383 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14384 LED_CTRL_MODE_PHY_2);
14389 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14390 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14391 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14392 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14394 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14395 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14397 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14398 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14399 if ((tp->pdev->subsystem_vendor ==
14400 PCI_VENDOR_ID_ARIMA) &&
14401 (tp->pdev->subsystem_device == 0x205a ||
14402 tp->pdev->subsystem_device == 0x2063))
14403 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14405 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14406 tg3_flag_set(tp, IS_NIC);
14409 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14410 tg3_flag_set(tp, ENABLE_ASF);
14411 if (tg3_flag(tp, 5750_PLUS))
14412 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14415 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14416 tg3_flag(tp, 5750_PLUS))
14417 tg3_flag_set(tp, ENABLE_APE);
14419 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14420 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14421 tg3_flag_clear(tp, WOL_CAP);
14423 if (tg3_flag(tp, WOL_CAP) &&
14424 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14425 tg3_flag_set(tp, WOL_ENABLE);
14426 device_set_wakeup_enable(&tp->pdev->dev, true);
14429 if (cfg2 & (1 << 17))
14430 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14432 /* serdes signal pre-emphasis in register 0x590 set by */
14433 /* bootcode if bit 18 is set */
14434 if (cfg2 & (1 << 18))
14435 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14437 if ((tg3_flag(tp, 57765_PLUS) ||
14438 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14439 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14440 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14441 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14443 if (tg3_flag(tp, PCI_EXPRESS) &&
14444 tg3_asic_rev(tp) != ASIC_REV_5785 &&
14445 !tg3_flag(tp, 57765_PLUS)) {
14448 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14449 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14450 tg3_flag_set(tp, ASPM_WORKAROUND);
14453 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14454 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14455 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14456 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14457 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14458 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14461 if (tg3_flag(tp, WOL_CAP))
14462 device_set_wakeup_enable(&tp->pdev->dev,
14463 tg3_flag(tp, WOL_ENABLE));
14465 device_set_wakeup_capable(&tp->pdev->dev, false);
14468 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14471 u32 val2, off = offset * 8;
14473 err = tg3_nvram_lock(tp);
14477 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14478 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14479 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14480 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14483 for (i = 0; i < 100; i++) {
14484 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14485 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14486 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14492 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14494 tg3_nvram_unlock(tp);
14495 if (val2 & APE_OTP_STATUS_CMD_DONE)
14501 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14506 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14507 tw32(OTP_CTRL, cmd);
14509 /* Wait for up to 1 ms for command to execute. */
14510 for (i = 0; i < 100; i++) {
14511 val = tr32(OTP_STATUS);
14512 if (val & OTP_STATUS_CMD_DONE)
14517 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14520 /* Read the gphy configuration from the OTP region of the chip. The gphy
14521 * configuration is a 32-bit value that straddles the alignment boundary.
14522 * We do two 32-bit reads and then shift and merge the results.
14524 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14526 u32 bhalf_otp, thalf_otp;
14528 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14530 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14533 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14535 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14538 thalf_otp = tr32(OTP_READ_DATA);
14540 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14542 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14545 bhalf_otp = tr32(OTP_READ_DATA);
14547 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14550 static void tg3_phy_init_link_config(struct tg3 *tp)
14552 u32 adv = ADVERTISED_Autoneg;
14554 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14555 adv |= ADVERTISED_1000baseT_Half |
14556 ADVERTISED_1000baseT_Full;
14558 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14559 adv |= ADVERTISED_100baseT_Half |
14560 ADVERTISED_100baseT_Full |
14561 ADVERTISED_10baseT_Half |
14562 ADVERTISED_10baseT_Full |
14565 adv |= ADVERTISED_FIBRE;
14567 tp->link_config.advertising = adv;
14568 tp->link_config.speed = SPEED_UNKNOWN;
14569 tp->link_config.duplex = DUPLEX_UNKNOWN;
14570 tp->link_config.autoneg = AUTONEG_ENABLE;
14571 tp->link_config.active_speed = SPEED_UNKNOWN;
14572 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14577 static int tg3_phy_probe(struct tg3 *tp)
14579 u32 hw_phy_id_1, hw_phy_id_2;
14580 u32 hw_phy_id, hw_phy_id_masked;
14583 /* flow control autonegotiation is default behavior */
14584 tg3_flag_set(tp, PAUSE_AUTONEG);
14585 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14587 if (tg3_flag(tp, ENABLE_APE)) {
14588 switch (tp->pci_fn) {
14590 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14593 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14596 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14599 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14604 if (tg3_flag(tp, USE_PHYLIB))
14605 return tg3_phy_init(tp);
14607 /* Reading the PHY ID register can conflict with ASF
14608 * firmware access to the PHY hardware.
14611 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14612 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14614 /* Now read the physical PHY_ID from the chip and verify
14615 * that it is sane. If it doesn't look good, we fall back
14616 * to either the hard-coded table based PHY_ID and failing
14617 * that the value found in the eeprom area.
14619 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14620 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14622 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14623 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14624 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14626 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14629 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14630 tp->phy_id = hw_phy_id;
14631 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14632 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14634 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14636 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14637 /* Do nothing, phy ID already set up in
14638 * tg3_get_eeprom_hw_cfg().
14641 struct subsys_tbl_ent *p;
14643 /* No eeprom signature? Try the hardcoded
14644 * subsys device table.
14646 p = tg3_lookup_by_subsys(tp);
14648 tp->phy_id = p->phy_id;
14649 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14650 /* For now we saw the IDs 0xbc050cd0,
14651 * 0xbc050f80 and 0xbc050c30 on devices
14652 * connected to an BCM4785 and there are
14653 * probably more. Just assume that the phy is
14654 * supported when it is connected to a SSB core
14661 tp->phy_id == TG3_PHY_ID_BCM8002)
14662 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14666 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14667 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14668 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14669 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14670 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14671 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14672 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14673 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14674 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14675 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14677 tg3_phy_init_link_config(tp);
14679 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14680 !tg3_flag(tp, ENABLE_APE) &&
14681 !tg3_flag(tp, ENABLE_ASF)) {
14684 tg3_readphy(tp, MII_BMSR, &bmsr);
14685 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14686 (bmsr & BMSR_LSTATUS))
14687 goto skip_phy_reset;
14689 err = tg3_phy_reset(tp);
14693 tg3_phy_set_wirespeed(tp);
14695 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14696 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14697 tp->link_config.flowctrl);
14699 tg3_writephy(tp, MII_BMCR,
14700 BMCR_ANENABLE | BMCR_ANRESTART);
14705 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14706 err = tg3_init_5401phy_dsp(tp);
14710 err = tg3_init_5401phy_dsp(tp);
14716 static void tg3_read_vpd(struct tg3 *tp)
14719 unsigned int block_end, rosize, len;
14723 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14727 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14729 goto out_not_found;
14731 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14732 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14733 i += PCI_VPD_LRDT_TAG_SIZE;
14735 if (block_end > vpdlen)
14736 goto out_not_found;
14738 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14739 PCI_VPD_RO_KEYWORD_MFR_ID);
14741 len = pci_vpd_info_field_size(&vpd_data[j]);
14743 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14744 if (j + len > block_end || len != 4 ||
14745 memcmp(&vpd_data[j], "1028", 4))
14748 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14749 PCI_VPD_RO_KEYWORD_VENDOR0);
14753 len = pci_vpd_info_field_size(&vpd_data[j]);
14755 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14756 if (j + len > block_end)
14759 if (len >= sizeof(tp->fw_ver))
14760 len = sizeof(tp->fw_ver) - 1;
14761 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
14762 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
14767 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14768 PCI_VPD_RO_KEYWORD_PARTNO);
14770 goto out_not_found;
14772 len = pci_vpd_info_field_size(&vpd_data[i]);
14774 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14775 if (len > TG3_BPN_SIZE ||
14776 (len + i) > vpdlen)
14777 goto out_not_found;
14779 memcpy(tp->board_part_number, &vpd_data[i], len);
14783 if (tp->board_part_number[0])
14787 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14788 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14789 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14790 strcpy(tp->board_part_number, "BCM5717");
14791 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14792 strcpy(tp->board_part_number, "BCM5718");
14795 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14796 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14797 strcpy(tp->board_part_number, "BCM57780");
14798 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14799 strcpy(tp->board_part_number, "BCM57760");
14800 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14801 strcpy(tp->board_part_number, "BCM57790");
14802 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14803 strcpy(tp->board_part_number, "BCM57788");
14806 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14807 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14808 strcpy(tp->board_part_number, "BCM57761");
14809 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14810 strcpy(tp->board_part_number, "BCM57765");
14811 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14812 strcpy(tp->board_part_number, "BCM57781");
14813 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14814 strcpy(tp->board_part_number, "BCM57785");
14815 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14816 strcpy(tp->board_part_number, "BCM57791");
14817 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14818 strcpy(tp->board_part_number, "BCM57795");
14821 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14822 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14823 strcpy(tp->board_part_number, "BCM57762");
14824 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14825 strcpy(tp->board_part_number, "BCM57766");
14826 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14827 strcpy(tp->board_part_number, "BCM57782");
14828 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14829 strcpy(tp->board_part_number, "BCM57786");
14832 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14833 strcpy(tp->board_part_number, "BCM95906");
14836 strcpy(tp->board_part_number, "none");
14840 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14844 if (tg3_nvram_read(tp, offset, &val) ||
14845 (val & 0xfc000000) != 0x0c000000 ||
14846 tg3_nvram_read(tp, offset + 4, &val) ||
14853 static void tg3_read_bc_ver(struct tg3 *tp)
14855 u32 val, offset, start, ver_offset;
14857 bool newver = false;
14859 if (tg3_nvram_read(tp, 0xc, &offset) ||
14860 tg3_nvram_read(tp, 0x4, &start))
14863 offset = tg3_nvram_logical_addr(tp, offset);
14865 if (tg3_nvram_read(tp, offset, &val))
14868 if ((val & 0xfc000000) == 0x0c000000) {
14869 if (tg3_nvram_read(tp, offset + 4, &val))
14876 dst_off = strlen(tp->fw_ver);
14879 if (TG3_VER_SIZE - dst_off < 16 ||
14880 tg3_nvram_read(tp, offset + 8, &ver_offset))
14883 offset = offset + ver_offset - start;
14884 for (i = 0; i < 16; i += 4) {
14886 if (tg3_nvram_read_be32(tp, offset + i, &v))
14889 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14894 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14897 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14898 TG3_NVM_BCVER_MAJSFT;
14899 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14900 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14901 "v%d.%02d", major, minor);
14905 static void tg3_read_hwsb_ver(struct tg3 *tp)
14907 u32 val, major, minor;
14909 /* Use native endian representation */
14910 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14913 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14914 TG3_NVM_HWSB_CFG1_MAJSFT;
14915 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14916 TG3_NVM_HWSB_CFG1_MINSFT;
14918 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14921 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14923 u32 offset, major, minor, build;
14925 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14927 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14930 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14931 case TG3_EEPROM_SB_REVISION_0:
14932 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14934 case TG3_EEPROM_SB_REVISION_2:
14935 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14937 case TG3_EEPROM_SB_REVISION_3:
14938 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14940 case TG3_EEPROM_SB_REVISION_4:
14941 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14943 case TG3_EEPROM_SB_REVISION_5:
14944 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14946 case TG3_EEPROM_SB_REVISION_6:
14947 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14953 if (tg3_nvram_read(tp, offset, &val))
14956 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14957 TG3_EEPROM_SB_EDH_BLD_SHFT;
14958 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14959 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14960 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14962 if (minor > 99 || build > 26)
14965 offset = strlen(tp->fw_ver);
14966 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14967 " v%d.%02d", major, minor);
14970 offset = strlen(tp->fw_ver);
14971 if (offset < TG3_VER_SIZE - 1)
14972 tp->fw_ver[offset] = 'a' + build - 1;
14976 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14978 u32 val, offset, start;
14981 for (offset = TG3_NVM_DIR_START;
14982 offset < TG3_NVM_DIR_END;
14983 offset += TG3_NVM_DIRENT_SIZE) {
14984 if (tg3_nvram_read(tp, offset, &val))
14987 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14991 if (offset == TG3_NVM_DIR_END)
14994 if (!tg3_flag(tp, 5705_PLUS))
14995 start = 0x08000000;
14996 else if (tg3_nvram_read(tp, offset - 4, &start))
14999 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15000 !tg3_fw_img_is_valid(tp, offset) ||
15001 tg3_nvram_read(tp, offset + 8, &val))
15004 offset += val - start;
15006 vlen = strlen(tp->fw_ver);
15008 tp->fw_ver[vlen++] = ',';
15009 tp->fw_ver[vlen++] = ' ';
15011 for (i = 0; i < 4; i++) {
15013 if (tg3_nvram_read_be32(tp, offset, &v))
15016 offset += sizeof(v);
15018 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15019 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15023 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15028 static void tg3_probe_ncsi(struct tg3 *tp)
15032 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15033 if (apedata != APE_SEG_SIG_MAGIC)
15036 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15037 if (!(apedata & APE_FW_STATUS_READY))
15040 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15041 tg3_flag_set(tp, APE_HAS_NCSI);
15044 static void tg3_read_dash_ver(struct tg3 *tp)
15050 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15052 if (tg3_flag(tp, APE_HAS_NCSI))
15054 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15059 vlen = strlen(tp->fw_ver);
15061 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15063 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15064 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15065 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15066 (apedata & APE_FW_VERSION_BLDMSK));
15069 static void tg3_read_otp_ver(struct tg3 *tp)
15073 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15076 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15077 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15078 TG3_OTP_MAGIC0_VALID(val)) {
15079 u64 val64 = (u64) val << 32 | val2;
15083 for (i = 0; i < 7; i++) {
15084 if ((val64 & 0xff) == 0)
15086 ver = val64 & 0xff;
15089 vlen = strlen(tp->fw_ver);
15090 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15094 static void tg3_read_fw_ver(struct tg3 *tp)
15097 bool vpd_vers = false;
15099 if (tp->fw_ver[0] != 0)
15102 if (tg3_flag(tp, NO_NVRAM)) {
15103 strcat(tp->fw_ver, "sb");
15104 tg3_read_otp_ver(tp);
15108 if (tg3_nvram_read(tp, 0, &val))
15111 if (val == TG3_EEPROM_MAGIC)
15112 tg3_read_bc_ver(tp);
15113 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15114 tg3_read_sb_ver(tp, val);
15115 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15116 tg3_read_hwsb_ver(tp);
15118 if (tg3_flag(tp, ENABLE_ASF)) {
15119 if (tg3_flag(tp, ENABLE_APE)) {
15120 tg3_probe_ncsi(tp);
15122 tg3_read_dash_ver(tp);
15123 } else if (!vpd_vers) {
15124 tg3_read_mgmtfw_ver(tp);
15128 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15131 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15133 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15134 return TG3_RX_RET_MAX_SIZE_5717;
15135 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15136 return TG3_RX_RET_MAX_SIZE_5700;
15138 return TG3_RX_RET_MAX_SIZE_5705;
15141 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15142 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15143 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15144 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15148 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15150 struct pci_dev *peer;
15151 unsigned int func, devnr = tp->pdev->devfn & ~7;
15153 for (func = 0; func < 8; func++) {
15154 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15155 if (peer && peer != tp->pdev)
15159 /* 5704 can be configured in single-port mode, set peer to
15160 * tp->pdev in that case.
15168 * We don't need to keep the refcount elevated; there's no way
15169 * to remove one half of this device without removing the other
15176 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15178 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15179 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15182 /* All devices that use the alternate
15183 * ASIC REV location have a CPMU.
15185 tg3_flag_set(tp, CPMU_PRESENT);
15187 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15188 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15189 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15190 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15191 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15192 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15193 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15194 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15195 reg = TG3PCI_GEN2_PRODID_ASICREV;
15196 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15197 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15198 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15199 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15200 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15201 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15202 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15203 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15204 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15205 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15206 reg = TG3PCI_GEN15_PRODID_ASICREV;
15208 reg = TG3PCI_PRODID_ASICREV;
15210 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15213 /* Wrong chip ID in 5752 A0. This code can be removed later
15214 * as A0 is not in production.
15216 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15217 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15219 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15220 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15222 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15223 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15224 tg3_asic_rev(tp) == ASIC_REV_5720)
15225 tg3_flag_set(tp, 5717_PLUS);
15227 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15228 tg3_asic_rev(tp) == ASIC_REV_57766)
15229 tg3_flag_set(tp, 57765_CLASS);
15231 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15232 tg3_asic_rev(tp) == ASIC_REV_5762)
15233 tg3_flag_set(tp, 57765_PLUS);
15235 /* Intentionally exclude ASIC_REV_5906 */
15236 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15237 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15238 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15239 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15240 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15241 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15242 tg3_flag(tp, 57765_PLUS))
15243 tg3_flag_set(tp, 5755_PLUS);
15245 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15246 tg3_asic_rev(tp) == ASIC_REV_5714)
15247 tg3_flag_set(tp, 5780_CLASS);
15249 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15250 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15251 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15252 tg3_flag(tp, 5755_PLUS) ||
15253 tg3_flag(tp, 5780_CLASS))
15254 tg3_flag_set(tp, 5750_PLUS);
15256 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15257 tg3_flag(tp, 5750_PLUS))
15258 tg3_flag_set(tp, 5705_PLUS);
15261 static bool tg3_10_100_only_device(struct tg3 *tp,
15262 const struct pci_device_id *ent)
15264 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15266 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15267 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15268 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15271 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15272 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15273 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15283 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15286 u32 pci_state_reg, grc_misc_cfg;
15291 /* Force memory write invalidate off. If we leave it on,
15292 * then on 5700_BX chips we have to enable a workaround.
15293 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15294 * to match the cacheline size. The Broadcom driver have this
15295 * workaround but turns MWI off all the times so never uses
15296 * it. This seems to suggest that the workaround is insufficient.
15298 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15299 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15300 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15302 /* Important! -- Make sure register accesses are byteswapped
15303 * correctly. Also, for those chips that require it, make
15304 * sure that indirect register accesses are enabled before
15305 * the first operation.
15307 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15309 tp->misc_host_ctrl |= (misc_ctrl_reg &
15310 MISC_HOST_CTRL_CHIPREV);
15311 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15312 tp->misc_host_ctrl);
15314 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15316 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15317 * we need to disable memory and use config. cycles
15318 * only to access all registers. The 5702/03 chips
15319 * can mistakenly decode the special cycles from the
15320 * ICH chipsets as memory write cycles, causing corruption
15321 * of register and memory space. Only certain ICH bridges
15322 * will drive special cycles with non-zero data during the
15323 * address phase which can fall within the 5703's address
15324 * range. This is not an ICH bug as the PCI spec allows
15325 * non-zero address during special cycles. However, only
15326 * these ICH bridges are known to drive non-zero addresses
15327 * during special cycles.
15329 * Since special cycles do not cross PCI bridges, we only
15330 * enable this workaround if the 5703 is on the secondary
15331 * bus of these ICH bridges.
15333 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15334 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15335 static struct tg3_dev_id {
15339 } ich_chipsets[] = {
15340 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15342 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15344 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15346 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15350 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15351 struct pci_dev *bridge = NULL;
15353 while (pci_id->vendor != 0) {
15354 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15360 if (pci_id->rev != PCI_ANY_ID) {
15361 if (bridge->revision > pci_id->rev)
15364 if (bridge->subordinate &&
15365 (bridge->subordinate->number ==
15366 tp->pdev->bus->number)) {
15367 tg3_flag_set(tp, ICH_WORKAROUND);
15368 pci_dev_put(bridge);
15374 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15375 static struct tg3_dev_id {
15378 } bridge_chipsets[] = {
15379 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15380 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15383 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15384 struct pci_dev *bridge = NULL;
15386 while (pci_id->vendor != 0) {
15387 bridge = pci_get_device(pci_id->vendor,
15394 if (bridge->subordinate &&
15395 (bridge->subordinate->number <=
15396 tp->pdev->bus->number) &&
15397 (bridge->subordinate->busn_res.end >=
15398 tp->pdev->bus->number)) {
15399 tg3_flag_set(tp, 5701_DMA_BUG);
15400 pci_dev_put(bridge);
15406 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15407 * DMA addresses > 40-bit. This bridge may have other additional
15408 * 57xx devices behind it in some 4-port NIC designs for example.
15409 * Any tg3 device found behind the bridge will also need the 40-bit
15412 if (tg3_flag(tp, 5780_CLASS)) {
15413 tg3_flag_set(tp, 40BIT_DMA_BUG);
15414 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15416 struct pci_dev *bridge = NULL;
15419 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15420 PCI_DEVICE_ID_SERVERWORKS_EPB,
15422 if (bridge && bridge->subordinate &&
15423 (bridge->subordinate->number <=
15424 tp->pdev->bus->number) &&
15425 (bridge->subordinate->busn_res.end >=
15426 tp->pdev->bus->number)) {
15427 tg3_flag_set(tp, 40BIT_DMA_BUG);
15428 pci_dev_put(bridge);
15434 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15435 tg3_asic_rev(tp) == ASIC_REV_5714)
15436 tp->pdev_peer = tg3_find_peer(tp);
15438 /* Determine TSO capabilities */
15439 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15440 ; /* Do nothing. HW bug. */
15441 else if (tg3_flag(tp, 57765_PLUS))
15442 tg3_flag_set(tp, HW_TSO_3);
15443 else if (tg3_flag(tp, 5755_PLUS) ||
15444 tg3_asic_rev(tp) == ASIC_REV_5906)
15445 tg3_flag_set(tp, HW_TSO_2);
15446 else if (tg3_flag(tp, 5750_PLUS)) {
15447 tg3_flag_set(tp, HW_TSO_1);
15448 tg3_flag_set(tp, TSO_BUG);
15449 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15450 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15451 tg3_flag_clear(tp, TSO_BUG);
15452 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15453 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15454 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15455 tg3_flag_set(tp, FW_TSO);
15456 tg3_flag_set(tp, TSO_BUG);
15457 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15458 tp->fw_needed = FIRMWARE_TG3TSO5;
15460 tp->fw_needed = FIRMWARE_TG3TSO;
15463 /* Selectively allow TSO based on operating conditions */
15464 if (tg3_flag(tp, HW_TSO_1) ||
15465 tg3_flag(tp, HW_TSO_2) ||
15466 tg3_flag(tp, HW_TSO_3) ||
15467 tg3_flag(tp, FW_TSO)) {
15468 /* For firmware TSO, assume ASF is disabled.
15469 * We'll disable TSO later if we discover ASF
15470 * is enabled in tg3_get_eeprom_hw_cfg().
15472 tg3_flag_set(tp, TSO_CAPABLE);
15474 tg3_flag_clear(tp, TSO_CAPABLE);
15475 tg3_flag_clear(tp, TSO_BUG);
15476 tp->fw_needed = NULL;
15479 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15480 tp->fw_needed = FIRMWARE_TG3;
15482 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15483 tp->fw_needed = FIRMWARE_TG357766;
15487 if (tg3_flag(tp, 5750_PLUS)) {
15488 tg3_flag_set(tp, SUPPORT_MSI);
15489 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15490 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15491 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15492 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15493 tp->pdev_peer == tp->pdev))
15494 tg3_flag_clear(tp, SUPPORT_MSI);
15496 if (tg3_flag(tp, 5755_PLUS) ||
15497 tg3_asic_rev(tp) == ASIC_REV_5906) {
15498 tg3_flag_set(tp, 1SHOT_MSI);
15501 if (tg3_flag(tp, 57765_PLUS)) {
15502 tg3_flag_set(tp, SUPPORT_MSIX);
15503 tp->irq_max = TG3_IRQ_MAX_VECS;
15509 if (tp->irq_max > 1) {
15510 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15511 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15513 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15514 tg3_asic_rev(tp) == ASIC_REV_5720)
15515 tp->txq_max = tp->irq_max - 1;
15518 if (tg3_flag(tp, 5755_PLUS) ||
15519 tg3_asic_rev(tp) == ASIC_REV_5906)
15520 tg3_flag_set(tp, SHORT_DMA_BUG);
15522 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15523 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15525 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15526 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15527 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15528 tg3_asic_rev(tp) == ASIC_REV_5762)
15529 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15531 if (tg3_flag(tp, 57765_PLUS) &&
15532 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15533 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15535 if (!tg3_flag(tp, 5705_PLUS) ||
15536 tg3_flag(tp, 5780_CLASS) ||
15537 tg3_flag(tp, USE_JUMBO_BDFLAG))
15538 tg3_flag_set(tp, JUMBO_CAPABLE);
15540 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15543 if (pci_is_pcie(tp->pdev)) {
15546 tg3_flag_set(tp, PCI_EXPRESS);
15548 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15549 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15550 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15551 tg3_flag_clear(tp, HW_TSO_2);
15552 tg3_flag_clear(tp, TSO_CAPABLE);
15554 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15555 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15556 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15557 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15558 tg3_flag_set(tp, CLKREQ_BUG);
15559 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15560 tg3_flag_set(tp, L1PLLPD_EN);
15562 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15563 /* BCM5785 devices are effectively PCIe devices, and should
15564 * follow PCIe codepaths, but do not have a PCIe capabilities
15567 tg3_flag_set(tp, PCI_EXPRESS);
15568 } else if (!tg3_flag(tp, 5705_PLUS) ||
15569 tg3_flag(tp, 5780_CLASS)) {
15570 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15571 if (!tp->pcix_cap) {
15572 dev_err(&tp->pdev->dev,
15573 "Cannot find PCI-X capability, aborting\n");
15577 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15578 tg3_flag_set(tp, PCIX_MODE);
15581 /* If we have an AMD 762 or VIA K8T800 chipset, write
15582 * reordering to the mailbox registers done by the host
15583 * controller can cause major troubles. We read back from
15584 * every mailbox register write to force the writes to be
15585 * posted to the chip in order.
15587 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15588 !tg3_flag(tp, PCI_EXPRESS))
15589 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15591 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15592 &tp->pci_cacheline_sz);
15593 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15594 &tp->pci_lat_timer);
15595 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15596 tp->pci_lat_timer < 64) {
15597 tp->pci_lat_timer = 64;
15598 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15599 tp->pci_lat_timer);
15602 /* Important! -- It is critical that the PCI-X hw workaround
15603 * situation is decided before the first MMIO register access.
15605 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15606 /* 5700 BX chips need to have their TX producer index
15607 * mailboxes written twice to workaround a bug.
15609 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15611 /* If we are in PCI-X mode, enable register write workaround.
15613 * The workaround is to use indirect register accesses
15614 * for all chip writes not to mailbox registers.
15616 if (tg3_flag(tp, PCIX_MODE)) {
15619 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15621 /* The chip can have it's power management PCI config
15622 * space registers clobbered due to this bug.
15623 * So explicitly force the chip into D0 here.
15625 pci_read_config_dword(tp->pdev,
15626 tp->pm_cap + PCI_PM_CTRL,
15628 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15629 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15630 pci_write_config_dword(tp->pdev,
15631 tp->pm_cap + PCI_PM_CTRL,
15634 /* Also, force SERR#/PERR# in PCI command. */
15635 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15636 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15637 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15641 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15642 tg3_flag_set(tp, PCI_HIGH_SPEED);
15643 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15644 tg3_flag_set(tp, PCI_32BIT);
15646 /* Chip-specific fixup from Broadcom driver */
15647 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15648 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15649 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15650 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15653 /* Default fast path register access methods */
15654 tp->read32 = tg3_read32;
15655 tp->write32 = tg3_write32;
15656 tp->read32_mbox = tg3_read32;
15657 tp->write32_mbox = tg3_write32;
15658 tp->write32_tx_mbox = tg3_write32;
15659 tp->write32_rx_mbox = tg3_write32;
15661 /* Various workaround register access methods */
15662 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15663 tp->write32 = tg3_write_indirect_reg32;
15664 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15665 (tg3_flag(tp, PCI_EXPRESS) &&
15666 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15668 * Back to back register writes can cause problems on these
15669 * chips, the workaround is to read back all reg writes
15670 * except those to mailbox regs.
15672 * See tg3_write_indirect_reg32().
15674 tp->write32 = tg3_write_flush_reg32;
15677 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15678 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15679 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15680 tp->write32_rx_mbox = tg3_write_flush_reg32;
15683 if (tg3_flag(tp, ICH_WORKAROUND)) {
15684 tp->read32 = tg3_read_indirect_reg32;
15685 tp->write32 = tg3_write_indirect_reg32;
15686 tp->read32_mbox = tg3_read_indirect_mbox;
15687 tp->write32_mbox = tg3_write_indirect_mbox;
15688 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15689 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15694 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15695 pci_cmd &= ~PCI_COMMAND_MEMORY;
15696 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15698 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15699 tp->read32_mbox = tg3_read32_mbox_5906;
15700 tp->write32_mbox = tg3_write32_mbox_5906;
15701 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15702 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15705 if (tp->write32 == tg3_write_indirect_reg32 ||
15706 (tg3_flag(tp, PCIX_MODE) &&
15707 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15708 tg3_asic_rev(tp) == ASIC_REV_5701)))
15709 tg3_flag_set(tp, SRAM_USE_CONFIG);
15711 /* The memory arbiter has to be enabled in order for SRAM accesses
15712 * to succeed. Normally on powerup the tg3 chip firmware will make
15713 * sure it is enabled, but other entities such as system netboot
15714 * code might disable it.
15716 val = tr32(MEMARB_MODE);
15717 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15719 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15720 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15721 tg3_flag(tp, 5780_CLASS)) {
15722 if (tg3_flag(tp, PCIX_MODE)) {
15723 pci_read_config_dword(tp->pdev,
15724 tp->pcix_cap + PCI_X_STATUS,
15726 tp->pci_fn = val & 0x7;
15728 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15729 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15730 tg3_asic_rev(tp) == ASIC_REV_5720) {
15731 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15732 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15733 val = tr32(TG3_CPMU_STATUS);
15735 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15736 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15738 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15739 TG3_CPMU_STATUS_FSHFT_5719;
15742 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15743 tp->write32_tx_mbox = tg3_write_flush_reg32;
15744 tp->write32_rx_mbox = tg3_write_flush_reg32;
15747 /* Get eeprom hw config before calling tg3_set_power_state().
15748 * In particular, the TG3_FLAG_IS_NIC flag must be
15749 * determined before calling tg3_set_power_state() so that
15750 * we know whether or not to switch out of Vaux power.
15751 * When the flag is set, it means that GPIO1 is used for eeprom
15752 * write protect and also implies that it is a LOM where GPIOs
15753 * are not used to switch power.
15755 tg3_get_eeprom_hw_cfg(tp);
15757 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15758 tg3_flag_clear(tp, TSO_CAPABLE);
15759 tg3_flag_clear(tp, TSO_BUG);
15760 tp->fw_needed = NULL;
15763 if (tg3_flag(tp, ENABLE_APE)) {
15764 /* Allow reads and writes to the
15765 * APE register and memory space.
15767 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15768 PCISTATE_ALLOW_APE_SHMEM_WR |
15769 PCISTATE_ALLOW_APE_PSPACE_WR;
15770 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15773 tg3_ape_lock_init(tp);
15776 /* Set up tp->grc_local_ctrl before calling
15777 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15778 * will bring 5700's external PHY out of reset.
15779 * It is also used as eeprom write protect on LOMs.
15781 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15782 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15783 tg3_flag(tp, EEPROM_WRITE_PROT))
15784 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15785 GRC_LCLCTRL_GPIO_OUTPUT1);
15786 /* Unused GPIO3 must be driven as output on 5752 because there
15787 * are no pull-up resistors on unused GPIO pins.
15789 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15790 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15792 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15793 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15794 tg3_flag(tp, 57765_CLASS))
15795 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15797 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15798 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15799 /* Turn off the debug UART. */
15800 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15801 if (tg3_flag(tp, IS_NIC))
15802 /* Keep VMain power. */
15803 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15804 GRC_LCLCTRL_GPIO_OUTPUT0;
15807 if (tg3_asic_rev(tp) == ASIC_REV_5762)
15808 tp->grc_local_ctrl |=
15809 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15811 /* Switch out of Vaux if it is a NIC */
15812 tg3_pwrsrc_switch_to_vmain(tp);
15814 /* Derive initial jumbo mode from MTU assigned in
15815 * ether_setup() via the alloc_etherdev() call
15817 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15818 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15820 /* Determine WakeOnLan speed to use. */
15821 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15822 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15823 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15824 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15825 tg3_flag_clear(tp, WOL_SPEED_100MB);
15827 tg3_flag_set(tp, WOL_SPEED_100MB);
15830 if (tg3_asic_rev(tp) == ASIC_REV_5906)
15831 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15833 /* A few boards don't want Ethernet@WireSpeed phy feature */
15834 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15835 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15836 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15837 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15838 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15839 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15840 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15842 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15843 tg3_chip_rev(tp) == CHIPREV_5704_AX)
15844 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15845 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15846 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15848 if (tg3_flag(tp, 5705_PLUS) &&
15849 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15850 tg3_asic_rev(tp) != ASIC_REV_5785 &&
15851 tg3_asic_rev(tp) != ASIC_REV_57780 &&
15852 !tg3_flag(tp, 57765_PLUS)) {
15853 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15854 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15855 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15856 tg3_asic_rev(tp) == ASIC_REV_5761) {
15857 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15858 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15859 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15860 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15861 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15863 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15866 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15867 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15868 tp->phy_otp = tg3_read_otp_phycfg(tp);
15869 if (tp->phy_otp == 0)
15870 tp->phy_otp = TG3_OTP_DEFAULT;
15873 if (tg3_flag(tp, CPMU_PRESENT))
15874 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15876 tp->mi_mode = MAC_MI_MODE_BASE;
15878 tp->coalesce_mode = 0;
15879 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15880 tg3_chip_rev(tp) != CHIPREV_5700_BX)
15881 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15883 /* Set these bits to enable statistics workaround. */
15884 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15885 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15886 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15887 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15888 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15891 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15892 tg3_asic_rev(tp) == ASIC_REV_57780)
15893 tg3_flag_set(tp, USE_PHYLIB);
15895 err = tg3_mdio_init(tp);
15899 /* Initialize data/descriptor byte/word swapping. */
15900 val = tr32(GRC_MODE);
15901 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15902 tg3_asic_rev(tp) == ASIC_REV_5762)
15903 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15904 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15905 GRC_MODE_B2HRX_ENABLE |
15906 GRC_MODE_HTX2B_ENABLE |
15907 GRC_MODE_HOST_STACKUP);
15909 val &= GRC_MODE_HOST_STACKUP;
15911 tw32(GRC_MODE, val | tp->grc_mode);
15913 tg3_switch_clocks(tp);
15915 /* Clear this out for sanity. */
15916 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15918 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15920 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15921 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15922 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15923 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15924 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15925 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15926 void __iomem *sram_base;
15928 /* Write some dummy words into the SRAM status block
15929 * area, see if it reads back correctly. If the return
15930 * value is bad, force enable the PCIX workaround.
15932 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15934 writel(0x00000000, sram_base);
15935 writel(0x00000000, sram_base + 4);
15936 writel(0xffffffff, sram_base + 4);
15937 if (readl(sram_base) != 0x00000000)
15938 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15943 tg3_nvram_init(tp);
15945 /* If the device has an NVRAM, no need to load patch firmware */
15946 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
15947 !tg3_flag(tp, NO_NVRAM))
15948 tp->fw_needed = NULL;
15950 grc_misc_cfg = tr32(GRC_MISC_CFG);
15951 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15953 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15954 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15955 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15956 tg3_flag_set(tp, IS_5788);
15958 if (!tg3_flag(tp, IS_5788) &&
15959 tg3_asic_rev(tp) != ASIC_REV_5700)
15960 tg3_flag_set(tp, TAGGED_STATUS);
15961 if (tg3_flag(tp, TAGGED_STATUS)) {
15962 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15963 HOSTCC_MODE_CLRTICK_TXBD);
15965 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15966 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15967 tp->misc_host_ctrl);
15970 /* Preserve the APE MAC_MODE bits */
15971 if (tg3_flag(tp, ENABLE_APE))
15972 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15976 if (tg3_10_100_only_device(tp, ent))
15977 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15979 err = tg3_phy_probe(tp);
15981 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15982 /* ... but do not return immediately ... */
15987 tg3_read_fw_ver(tp);
15989 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15990 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15992 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15993 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15995 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15998 /* 5700 {AX,BX} chips have a broken status block link
15999 * change bit implementation, so we must use the
16000 * status register in those cases.
16002 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16003 tg3_flag_set(tp, USE_LINKCHG_REG);
16005 tg3_flag_clear(tp, USE_LINKCHG_REG);
16007 /* The led_ctrl is set during tg3_phy_probe, here we might
16008 * have to force the link status polling mechanism based
16009 * upon subsystem IDs.
16011 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16012 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16013 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16014 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16015 tg3_flag_set(tp, USE_LINKCHG_REG);
16018 /* For all SERDES we poll the MAC status register. */
16019 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16020 tg3_flag_set(tp, POLL_SERDES);
16022 tg3_flag_clear(tp, POLL_SERDES);
16024 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16025 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16026 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16027 tg3_flag(tp, PCIX_MODE)) {
16028 tp->rx_offset = NET_SKB_PAD;
16029 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16030 tp->rx_copy_thresh = ~(u16)0;
16034 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16035 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16036 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16038 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16040 /* Increment the rx prod index on the rx std ring by at most
16041 * 8 for these chips to workaround hw errata.
16043 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16044 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16045 tg3_asic_rev(tp) == ASIC_REV_5755)
16046 tp->rx_std_max_post = 8;
16048 if (tg3_flag(tp, ASPM_WORKAROUND))
16049 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16050 PCIE_PWR_MGMT_L1_THRESH_MSK;
16055 #ifdef CONFIG_SPARC
16056 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16058 struct net_device *dev = tp->dev;
16059 struct pci_dev *pdev = tp->pdev;
16060 struct device_node *dp = pci_device_to_OF_node(pdev);
16061 const unsigned char *addr;
16064 addr = of_get_property(dp, "local-mac-address", &len);
16065 if (addr && len == 6) {
16066 memcpy(dev->dev_addr, addr, 6);
16072 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16074 struct net_device *dev = tp->dev;
16076 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16081 static int tg3_get_device_address(struct tg3 *tp)
16083 struct net_device *dev = tp->dev;
16084 u32 hi, lo, mac_offset;
16088 #ifdef CONFIG_SPARC
16089 if (!tg3_get_macaddr_sparc(tp))
16093 if (tg3_flag(tp, IS_SSB_CORE)) {
16094 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16095 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16100 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16101 tg3_flag(tp, 5780_CLASS)) {
16102 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16104 if (tg3_nvram_lock(tp))
16105 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16107 tg3_nvram_unlock(tp);
16108 } else if (tg3_flag(tp, 5717_PLUS)) {
16109 if (tp->pci_fn & 1)
16111 if (tp->pci_fn > 1)
16112 mac_offset += 0x18c;
16113 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16116 /* First try to get it from MAC address mailbox. */
16117 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16118 if ((hi >> 16) == 0x484b) {
16119 dev->dev_addr[0] = (hi >> 8) & 0xff;
16120 dev->dev_addr[1] = (hi >> 0) & 0xff;
16122 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16123 dev->dev_addr[2] = (lo >> 24) & 0xff;
16124 dev->dev_addr[3] = (lo >> 16) & 0xff;
16125 dev->dev_addr[4] = (lo >> 8) & 0xff;
16126 dev->dev_addr[5] = (lo >> 0) & 0xff;
16128 /* Some old bootcode may report a 0 MAC address in SRAM */
16129 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16132 /* Next, try NVRAM. */
16133 if (!tg3_flag(tp, NO_NVRAM) &&
16134 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16135 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16136 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16137 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16139 /* Finally just fetch it out of the MAC control regs. */
16141 hi = tr32(MAC_ADDR_0_HIGH);
16142 lo = tr32(MAC_ADDR_0_LOW);
16144 dev->dev_addr[5] = lo & 0xff;
16145 dev->dev_addr[4] = (lo >> 8) & 0xff;
16146 dev->dev_addr[3] = (lo >> 16) & 0xff;
16147 dev->dev_addr[2] = (lo >> 24) & 0xff;
16148 dev->dev_addr[1] = hi & 0xff;
16149 dev->dev_addr[0] = (hi >> 8) & 0xff;
16153 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16154 #ifdef CONFIG_SPARC
16155 if (!tg3_get_default_macaddr_sparc(tp))
16163 #define BOUNDARY_SINGLE_CACHELINE 1
16164 #define BOUNDARY_MULTI_CACHELINE 2
16166 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16168 int cacheline_size;
16172 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16174 cacheline_size = 1024;
16176 cacheline_size = (int) byte * 4;
16178 /* On 5703 and later chips, the boundary bits have no
16181 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16182 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16183 !tg3_flag(tp, PCI_EXPRESS))
16186 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16187 goal = BOUNDARY_MULTI_CACHELINE;
16189 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16190 goal = BOUNDARY_SINGLE_CACHELINE;
16196 if (tg3_flag(tp, 57765_PLUS)) {
16197 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16204 /* PCI controllers on most RISC systems tend to disconnect
16205 * when a device tries to burst across a cache-line boundary.
16206 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16208 * Unfortunately, for PCI-E there are only limited
16209 * write-side controls for this, and thus for reads
16210 * we will still get the disconnects. We'll also waste
16211 * these PCI cycles for both read and write for chips
16212 * other than 5700 and 5701 which do not implement the
16215 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16216 switch (cacheline_size) {
16221 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16222 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16223 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16225 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16226 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16231 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16232 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16236 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16237 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16240 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16241 switch (cacheline_size) {
16245 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16246 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16247 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16253 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16254 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16258 switch (cacheline_size) {
16260 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16261 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16262 DMA_RWCTRL_WRITE_BNDRY_16);
16267 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16268 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16269 DMA_RWCTRL_WRITE_BNDRY_32);
16274 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16275 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16276 DMA_RWCTRL_WRITE_BNDRY_64);
16281 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16282 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16283 DMA_RWCTRL_WRITE_BNDRY_128);
16288 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16289 DMA_RWCTRL_WRITE_BNDRY_256);
16292 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16293 DMA_RWCTRL_WRITE_BNDRY_512);
16297 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16298 DMA_RWCTRL_WRITE_BNDRY_1024);
16307 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16308 int size, int to_device)
16310 struct tg3_internal_buffer_desc test_desc;
16311 u32 sram_dma_descs;
16314 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16316 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16317 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16318 tw32(RDMAC_STATUS, 0);
16319 tw32(WDMAC_STATUS, 0);
16321 tw32(BUFMGR_MODE, 0);
16322 tw32(FTQ_RESET, 0);
16324 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16325 test_desc.addr_lo = buf_dma & 0xffffffff;
16326 test_desc.nic_mbuf = 0x00002100;
16327 test_desc.len = size;
16330 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16331 * the *second* time the tg3 driver was getting loaded after an
16334 * Broadcom tells me:
16335 * ...the DMA engine is connected to the GRC block and a DMA
16336 * reset may affect the GRC block in some unpredictable way...
16337 * The behavior of resets to individual blocks has not been tested.
16339 * Broadcom noted the GRC reset will also reset all sub-components.
16342 test_desc.cqid_sqid = (13 << 8) | 2;
16344 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16347 test_desc.cqid_sqid = (16 << 8) | 7;
16349 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16352 test_desc.flags = 0x00000005;
16354 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16357 val = *(((u32 *)&test_desc) + i);
16358 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16359 sram_dma_descs + (i * sizeof(u32)));
16360 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16362 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16365 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16367 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16370 for (i = 0; i < 40; i++) {
16374 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16376 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16377 if ((val & 0xffff) == sram_dma_descs) {
16388 #define TEST_BUFFER_SIZE 0x2000
16390 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16391 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16395 static int tg3_test_dma(struct tg3 *tp)
16397 dma_addr_t buf_dma;
16398 u32 *buf, saved_dma_rwctrl;
16401 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16402 &buf_dma, GFP_KERNEL);
16408 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16409 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16411 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16413 if (tg3_flag(tp, 57765_PLUS))
16416 if (tg3_flag(tp, PCI_EXPRESS)) {
16417 /* DMA read watermark not used on PCIE */
16418 tp->dma_rwctrl |= 0x00180000;
16419 } else if (!tg3_flag(tp, PCIX_MODE)) {
16420 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16421 tg3_asic_rev(tp) == ASIC_REV_5750)
16422 tp->dma_rwctrl |= 0x003f0000;
16424 tp->dma_rwctrl |= 0x003f000f;
16426 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16427 tg3_asic_rev(tp) == ASIC_REV_5704) {
16428 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16429 u32 read_water = 0x7;
16431 /* If the 5704 is behind the EPB bridge, we can
16432 * do the less restrictive ONE_DMA workaround for
16433 * better performance.
16435 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16436 tg3_asic_rev(tp) == ASIC_REV_5704)
16437 tp->dma_rwctrl |= 0x8000;
16438 else if (ccval == 0x6 || ccval == 0x7)
16439 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16441 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16443 /* Set bit 23 to enable PCIX hw bug fix */
16445 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16446 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16448 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16449 /* 5780 always in PCIX mode */
16450 tp->dma_rwctrl |= 0x00144000;
16451 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16452 /* 5714 always in PCIX mode */
16453 tp->dma_rwctrl |= 0x00148000;
16455 tp->dma_rwctrl |= 0x001b000f;
16458 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16459 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16461 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16462 tg3_asic_rev(tp) == ASIC_REV_5704)
16463 tp->dma_rwctrl &= 0xfffffff0;
16465 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16466 tg3_asic_rev(tp) == ASIC_REV_5701) {
16467 /* Remove this if it causes problems for some boards. */
16468 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16470 /* On 5700/5701 chips, we need to set this bit.
16471 * Otherwise the chip will issue cacheline transactions
16472 * to streamable DMA memory with not all the byte
16473 * enables turned on. This is an error on several
16474 * RISC PCI controllers, in particular sparc64.
16476 * On 5703/5704 chips, this bit has been reassigned
16477 * a different meaning. In particular, it is used
16478 * on those chips to enable a PCI-X workaround.
16480 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16483 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16486 /* Unneeded, already done by tg3_get_invariants. */
16487 tg3_switch_clocks(tp);
16490 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16491 tg3_asic_rev(tp) != ASIC_REV_5701)
16494 /* It is best to perform DMA test with maximum write burst size
16495 * to expose the 5700/5701 write DMA bug.
16497 saved_dma_rwctrl = tp->dma_rwctrl;
16498 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16499 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16504 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16507 /* Send the buffer to the chip. */
16508 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16510 dev_err(&tp->pdev->dev,
16511 "%s: Buffer write failed. err = %d\n",
16517 /* validate data reached card RAM correctly. */
16518 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16520 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16521 if (le32_to_cpu(val) != p[i]) {
16522 dev_err(&tp->pdev->dev,
16523 "%s: Buffer corrupted on device! "
16524 "(%d != %d)\n", __func__, val, i);
16525 /* ret = -ENODEV here? */
16530 /* Now read it back. */
16531 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16533 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16534 "err = %d\n", __func__, ret);
16539 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16543 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16544 DMA_RWCTRL_WRITE_BNDRY_16) {
16545 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16546 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16547 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16550 dev_err(&tp->pdev->dev,
16551 "%s: Buffer corrupted on read back! "
16552 "(%d != %d)\n", __func__, p[i], i);
16558 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16564 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16565 DMA_RWCTRL_WRITE_BNDRY_16) {
16566 /* DMA test passed without adjusting DMA boundary,
16567 * now look for chipsets that are known to expose the
16568 * DMA bug without failing the test.
16570 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16571 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16572 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16574 /* Safe to use the calculated DMA boundary. */
16575 tp->dma_rwctrl = saved_dma_rwctrl;
16578 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16582 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16587 static void tg3_init_bufmgr_config(struct tg3 *tp)
16589 if (tg3_flag(tp, 57765_PLUS)) {
16590 tp->bufmgr_config.mbuf_read_dma_low_water =
16591 DEFAULT_MB_RDMA_LOW_WATER_5705;
16592 tp->bufmgr_config.mbuf_mac_rx_low_water =
16593 DEFAULT_MB_MACRX_LOW_WATER_57765;
16594 tp->bufmgr_config.mbuf_high_water =
16595 DEFAULT_MB_HIGH_WATER_57765;
16597 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16598 DEFAULT_MB_RDMA_LOW_WATER_5705;
16599 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16600 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16601 tp->bufmgr_config.mbuf_high_water_jumbo =
16602 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16603 } else if (tg3_flag(tp, 5705_PLUS)) {
16604 tp->bufmgr_config.mbuf_read_dma_low_water =
16605 DEFAULT_MB_RDMA_LOW_WATER_5705;
16606 tp->bufmgr_config.mbuf_mac_rx_low_water =
16607 DEFAULT_MB_MACRX_LOW_WATER_5705;
16608 tp->bufmgr_config.mbuf_high_water =
16609 DEFAULT_MB_HIGH_WATER_5705;
16610 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16611 tp->bufmgr_config.mbuf_mac_rx_low_water =
16612 DEFAULT_MB_MACRX_LOW_WATER_5906;
16613 tp->bufmgr_config.mbuf_high_water =
16614 DEFAULT_MB_HIGH_WATER_5906;
16617 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16618 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16619 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16620 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16621 tp->bufmgr_config.mbuf_high_water_jumbo =
16622 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16624 tp->bufmgr_config.mbuf_read_dma_low_water =
16625 DEFAULT_MB_RDMA_LOW_WATER;
16626 tp->bufmgr_config.mbuf_mac_rx_low_water =
16627 DEFAULT_MB_MACRX_LOW_WATER;
16628 tp->bufmgr_config.mbuf_high_water =
16629 DEFAULT_MB_HIGH_WATER;
16631 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16632 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16633 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16634 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16635 tp->bufmgr_config.mbuf_high_water_jumbo =
16636 DEFAULT_MB_HIGH_WATER_JUMBO;
16639 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16640 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16643 static char *tg3_phy_string(struct tg3 *tp)
16645 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16646 case TG3_PHY_ID_BCM5400: return "5400";
16647 case TG3_PHY_ID_BCM5401: return "5401";
16648 case TG3_PHY_ID_BCM5411: return "5411";
16649 case TG3_PHY_ID_BCM5701: return "5701";
16650 case TG3_PHY_ID_BCM5703: return "5703";
16651 case TG3_PHY_ID_BCM5704: return "5704";
16652 case TG3_PHY_ID_BCM5705: return "5705";
16653 case TG3_PHY_ID_BCM5750: return "5750";
16654 case TG3_PHY_ID_BCM5752: return "5752";
16655 case TG3_PHY_ID_BCM5714: return "5714";
16656 case TG3_PHY_ID_BCM5780: return "5780";
16657 case TG3_PHY_ID_BCM5755: return "5755";
16658 case TG3_PHY_ID_BCM5787: return "5787";
16659 case TG3_PHY_ID_BCM5784: return "5784";
16660 case TG3_PHY_ID_BCM5756: return "5722/5756";
16661 case TG3_PHY_ID_BCM5906: return "5906";
16662 case TG3_PHY_ID_BCM5761: return "5761";
16663 case TG3_PHY_ID_BCM5718C: return "5718C";
16664 case TG3_PHY_ID_BCM5718S: return "5718S";
16665 case TG3_PHY_ID_BCM57765: return "57765";
16666 case TG3_PHY_ID_BCM5719C: return "5719C";
16667 case TG3_PHY_ID_BCM5720C: return "5720C";
16668 case TG3_PHY_ID_BCM5762: return "5762C";
16669 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16670 case 0: return "serdes";
16671 default: return "unknown";
16675 static char *tg3_bus_string(struct tg3 *tp, char *str)
16677 if (tg3_flag(tp, PCI_EXPRESS)) {
16678 strcpy(str, "PCI Express");
16680 } else if (tg3_flag(tp, PCIX_MODE)) {
16681 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16683 strcpy(str, "PCIX:");
16685 if ((clock_ctrl == 7) ||
16686 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16687 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16688 strcat(str, "133MHz");
16689 else if (clock_ctrl == 0)
16690 strcat(str, "33MHz");
16691 else if (clock_ctrl == 2)
16692 strcat(str, "50MHz");
16693 else if (clock_ctrl == 4)
16694 strcat(str, "66MHz");
16695 else if (clock_ctrl == 6)
16696 strcat(str, "100MHz");
16698 strcpy(str, "PCI:");
16699 if (tg3_flag(tp, PCI_HIGH_SPEED))
16700 strcat(str, "66MHz");
16702 strcat(str, "33MHz");
16704 if (tg3_flag(tp, PCI_32BIT))
16705 strcat(str, ":32-bit");
16707 strcat(str, ":64-bit");
16711 static void tg3_init_coal(struct tg3 *tp)
16713 struct ethtool_coalesce *ec = &tp->coal;
16715 memset(ec, 0, sizeof(*ec));
16716 ec->cmd = ETHTOOL_GCOALESCE;
16717 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16718 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16719 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16720 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16721 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16722 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16723 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16724 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16725 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16727 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16728 HOSTCC_MODE_CLRTICK_TXBD)) {
16729 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16730 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16731 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16732 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16735 if (tg3_flag(tp, 5705_PLUS)) {
16736 ec->rx_coalesce_usecs_irq = 0;
16737 ec->tx_coalesce_usecs_irq = 0;
16738 ec->stats_block_coalesce_usecs = 0;
16742 static int tg3_init_one(struct pci_dev *pdev,
16743 const struct pci_device_id *ent)
16745 struct net_device *dev;
16747 int i, err, pm_cap;
16748 u32 sndmbx, rcvmbx, intmbx;
16750 u64 dma_mask, persist_dma_mask;
16751 netdev_features_t features = 0;
16753 printk_once(KERN_INFO "%s\n", version);
16755 err = pci_enable_device(pdev);
16757 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16761 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16763 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16764 goto err_out_disable_pdev;
16767 pci_set_master(pdev);
16769 /* Find power-management capability. */
16770 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16772 dev_err(&pdev->dev,
16773 "Cannot find Power Management capability, aborting\n");
16775 goto err_out_free_res;
16778 err = pci_set_power_state(pdev, PCI_D0);
16780 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16781 goto err_out_free_res;
16784 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16787 goto err_out_power_down;
16790 SET_NETDEV_DEV(dev, &pdev->dev);
16792 tp = netdev_priv(dev);
16795 tp->pm_cap = pm_cap;
16796 tp->rx_mode = TG3_DEF_RX_MODE;
16797 tp->tx_mode = TG3_DEF_TX_MODE;
16801 tp->msg_enable = tg3_debug;
16803 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16805 if (pdev_is_ssb_gige_core(pdev)) {
16806 tg3_flag_set(tp, IS_SSB_CORE);
16807 if (ssb_gige_must_flush_posted_writes(pdev))
16808 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16809 if (ssb_gige_one_dma_at_once(pdev))
16810 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16811 if (ssb_gige_have_roboswitch(pdev))
16812 tg3_flag_set(tp, ROBOSWITCH);
16813 if (ssb_gige_is_rgmii(pdev))
16814 tg3_flag_set(tp, RGMII_MODE);
16817 /* The word/byte swap controls here control register access byte
16818 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16821 tp->misc_host_ctrl =
16822 MISC_HOST_CTRL_MASK_PCI_INT |
16823 MISC_HOST_CTRL_WORD_SWAP |
16824 MISC_HOST_CTRL_INDIR_ACCESS |
16825 MISC_HOST_CTRL_PCISTATE_RW;
16827 /* The NONFRM (non-frame) byte/word swap controls take effect
16828 * on descriptor entries, anything which isn't packet data.
16830 * The StrongARM chips on the board (one for tx, one for rx)
16831 * are running in big-endian mode.
16833 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16834 GRC_MODE_WSWAP_NONFRM_DATA);
16835 #ifdef __BIG_ENDIAN
16836 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16838 spin_lock_init(&tp->lock);
16839 spin_lock_init(&tp->indirect_lock);
16840 INIT_WORK(&tp->reset_task, tg3_reset_task);
16842 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16844 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16846 goto err_out_free_dev;
16849 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16850 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16851 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16852 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16853 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16854 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16855 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16856 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16857 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16858 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16859 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16860 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16861 tg3_flag_set(tp, ENABLE_APE);
16862 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16863 if (!tp->aperegs) {
16864 dev_err(&pdev->dev,
16865 "Cannot map APE registers, aborting\n");
16867 goto err_out_iounmap;
16871 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16872 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16874 dev->ethtool_ops = &tg3_ethtool_ops;
16875 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16876 dev->netdev_ops = &tg3_netdev_ops;
16877 dev->irq = pdev->irq;
16879 err = tg3_get_invariants(tp, ent);
16881 dev_err(&pdev->dev,
16882 "Problem fetching invariants of chip, aborting\n");
16883 goto err_out_apeunmap;
16886 /* The EPB bridge inside 5714, 5715, and 5780 and any
16887 * device behind the EPB cannot support DMA addresses > 40-bit.
16888 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16889 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16890 * do DMA address check in tg3_start_xmit().
16892 if (tg3_flag(tp, IS_5788))
16893 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16894 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16895 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16896 #ifdef CONFIG_HIGHMEM
16897 dma_mask = DMA_BIT_MASK(64);
16900 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16902 /* Configure DMA attributes. */
16903 if (dma_mask > DMA_BIT_MASK(32)) {
16904 err = pci_set_dma_mask(pdev, dma_mask);
16906 features |= NETIF_F_HIGHDMA;
16907 err = pci_set_consistent_dma_mask(pdev,
16910 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16911 "DMA for consistent allocations\n");
16912 goto err_out_apeunmap;
16916 if (err || dma_mask == DMA_BIT_MASK(32)) {
16917 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16919 dev_err(&pdev->dev,
16920 "No usable DMA configuration, aborting\n");
16921 goto err_out_apeunmap;
16925 tg3_init_bufmgr_config(tp);
16927 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16929 /* 5700 B0 chips do not support checksumming correctly due
16930 * to hardware bugs.
16932 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16933 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16935 if (tg3_flag(tp, 5755_PLUS))
16936 features |= NETIF_F_IPV6_CSUM;
16939 /* TSO is on by default on chips that support hardware TSO.
16940 * Firmware TSO on older chips gives lower performance, so it
16941 * is off by default, but can be enabled using ethtool.
16943 if ((tg3_flag(tp, HW_TSO_1) ||
16944 tg3_flag(tp, HW_TSO_2) ||
16945 tg3_flag(tp, HW_TSO_3)) &&
16946 (features & NETIF_F_IP_CSUM))
16947 features |= NETIF_F_TSO;
16948 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16949 if (features & NETIF_F_IPV6_CSUM)
16950 features |= NETIF_F_TSO6;
16951 if (tg3_flag(tp, HW_TSO_3) ||
16952 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16953 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16954 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16955 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16956 tg3_asic_rev(tp) == ASIC_REV_57780)
16957 features |= NETIF_F_TSO_ECN;
16960 dev->features |= features;
16961 dev->vlan_features |= features;
16964 * Add loopback capability only for a subset of devices that support
16965 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16966 * loopback for the remaining devices.
16968 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16969 !tg3_flag(tp, CPMU_PRESENT))
16970 /* Add the loopback capability */
16971 features |= NETIF_F_LOOPBACK;
16973 dev->hw_features |= features;
16975 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16976 !tg3_flag(tp, TSO_CAPABLE) &&
16977 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16978 tg3_flag_set(tp, MAX_RXPEND_64);
16979 tp->rx_pending = 63;
16982 err = tg3_get_device_address(tp);
16984 dev_err(&pdev->dev,
16985 "Could not obtain valid ethernet address, aborting\n");
16986 goto err_out_apeunmap;
16990 * Reset chip in case UNDI or EFI driver did not shutdown
16991 * DMA self test will enable WDMAC and we'll see (spurious)
16992 * pending DMA on the PCI bus at that point.
16994 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16995 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16996 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16997 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17000 err = tg3_test_dma(tp);
17002 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17003 goto err_out_apeunmap;
17006 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17007 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17008 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17009 for (i = 0; i < tp->irq_max; i++) {
17010 struct tg3_napi *tnapi = &tp->napi[i];
17013 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17015 tnapi->int_mbox = intmbx;
17021 tnapi->consmbox = rcvmbx;
17022 tnapi->prodmbox = sndmbx;
17025 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17027 tnapi->coal_now = HOSTCC_MODE_NOW;
17029 if (!tg3_flag(tp, SUPPORT_MSIX))
17033 * If we support MSIX, we'll be using RSS. If we're using
17034 * RSS, the first vector only handles link interrupts and the
17035 * remaining vectors handle rx and tx interrupts. Reuse the
17036 * mailbox values for the next iteration. The values we setup
17037 * above are still useful for the single vectored mode.
17052 pci_set_drvdata(pdev, dev);
17054 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17055 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17056 tg3_asic_rev(tp) == ASIC_REV_5762)
17057 tg3_flag_set(tp, PTP_CAPABLE);
17059 if (tg3_flag(tp, 5717_PLUS)) {
17060 /* Resume a low-power mode */
17061 tg3_frob_aux_power(tp, false);
17064 tg3_timer_init(tp);
17066 tg3_carrier_off(tp);
17068 err = register_netdev(dev);
17070 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17071 goto err_out_apeunmap;
17074 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17075 tp->board_part_number,
17076 tg3_chip_rev_id(tp),
17077 tg3_bus_string(tp, str),
17080 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17081 struct phy_device *phydev;
17082 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17084 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17085 phydev->drv->name, dev_name(&phydev->dev));
17089 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17090 ethtype = "10/100Base-TX";
17091 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17092 ethtype = "1000Base-SX";
17094 ethtype = "10/100/1000Base-T";
17096 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17097 "(WireSpeed[%d], EEE[%d])\n",
17098 tg3_phy_string(tp), ethtype,
17099 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17100 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17103 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17104 (dev->features & NETIF_F_RXCSUM) != 0,
17105 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17106 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17107 tg3_flag(tp, ENABLE_ASF) != 0,
17108 tg3_flag(tp, TSO_CAPABLE) != 0);
17109 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17111 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17112 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17114 pci_save_state(pdev);
17120 iounmap(tp->aperegs);
17121 tp->aperegs = NULL;
17133 err_out_power_down:
17134 pci_set_power_state(pdev, PCI_D3hot);
17137 pci_release_regions(pdev);
17139 err_out_disable_pdev:
17140 pci_disable_device(pdev);
17141 pci_set_drvdata(pdev, NULL);
17145 static void tg3_remove_one(struct pci_dev *pdev)
17147 struct net_device *dev = pci_get_drvdata(pdev);
17150 struct tg3 *tp = netdev_priv(dev);
17152 release_firmware(tp->fw);
17154 tg3_reset_task_cancel(tp);
17156 if (tg3_flag(tp, USE_PHYLIB)) {
17161 unregister_netdev(dev);
17163 iounmap(tp->aperegs);
17164 tp->aperegs = NULL;
17171 pci_release_regions(pdev);
17172 pci_disable_device(pdev);
17173 pci_set_drvdata(pdev, NULL);
17177 #ifdef CONFIG_PM_SLEEP
17178 static int tg3_suspend(struct device *device)
17180 struct pci_dev *pdev = to_pci_dev(device);
17181 struct net_device *dev = pci_get_drvdata(pdev);
17182 struct tg3 *tp = netdev_priv(dev);
17185 if (!netif_running(dev))
17188 tg3_reset_task_cancel(tp);
17190 tg3_netif_stop(tp);
17192 tg3_timer_stop(tp);
17194 tg3_full_lock(tp, 1);
17195 tg3_disable_ints(tp);
17196 tg3_full_unlock(tp);
17198 netif_device_detach(dev);
17200 tg3_full_lock(tp, 0);
17201 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17202 tg3_flag_clear(tp, INIT_COMPLETE);
17203 tg3_full_unlock(tp);
17205 err = tg3_power_down_prepare(tp);
17209 tg3_full_lock(tp, 0);
17211 tg3_flag_set(tp, INIT_COMPLETE);
17212 err2 = tg3_restart_hw(tp, 1);
17216 tg3_timer_start(tp);
17218 netif_device_attach(dev);
17219 tg3_netif_start(tp);
17222 tg3_full_unlock(tp);
17231 static int tg3_resume(struct device *device)
17233 struct pci_dev *pdev = to_pci_dev(device);
17234 struct net_device *dev = pci_get_drvdata(pdev);
17235 struct tg3 *tp = netdev_priv(dev);
17238 if (!netif_running(dev))
17241 netif_device_attach(dev);
17243 tg3_full_lock(tp, 0);
17245 tg3_flag_set(tp, INIT_COMPLETE);
17246 err = tg3_restart_hw(tp, 1);
17250 tg3_timer_start(tp);
17252 tg3_netif_start(tp);
17255 tg3_full_unlock(tp);
17263 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17264 #define TG3_PM_OPS (&tg3_pm_ops)
17268 #define TG3_PM_OPS NULL
17270 #endif /* CONFIG_PM_SLEEP */
17273 * tg3_io_error_detected - called when PCI error is detected
17274 * @pdev: Pointer to PCI device
17275 * @state: The current pci connection state
17277 * This function is called after a PCI bus error affecting
17278 * this device has been detected.
17280 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17281 pci_channel_state_t state)
17283 struct net_device *netdev = pci_get_drvdata(pdev);
17284 struct tg3 *tp = netdev_priv(netdev);
17285 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17287 netdev_info(netdev, "PCI I/O error detected\n");
17291 if (!netif_running(netdev))
17296 tg3_netif_stop(tp);
17298 tg3_timer_stop(tp);
17300 /* Want to make sure that the reset task doesn't run */
17301 tg3_reset_task_cancel(tp);
17303 netif_device_detach(netdev);
17305 /* Clean up software state, even if MMIO is blocked */
17306 tg3_full_lock(tp, 0);
17307 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17308 tg3_full_unlock(tp);
17311 if (state == pci_channel_io_perm_failure)
17312 err = PCI_ERS_RESULT_DISCONNECT;
17314 pci_disable_device(pdev);
17322 * tg3_io_slot_reset - called after the pci bus has been reset.
17323 * @pdev: Pointer to PCI device
17325 * Restart the card from scratch, as if from a cold-boot.
17326 * At this point, the card has exprienced a hard reset,
17327 * followed by fixups by BIOS, and has its config space
17328 * set up identically to what it was at cold boot.
17330 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17332 struct net_device *netdev = pci_get_drvdata(pdev);
17333 struct tg3 *tp = netdev_priv(netdev);
17334 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17339 if (pci_enable_device(pdev)) {
17340 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17344 pci_set_master(pdev);
17345 pci_restore_state(pdev);
17346 pci_save_state(pdev);
17348 if (!netif_running(netdev)) {
17349 rc = PCI_ERS_RESULT_RECOVERED;
17353 err = tg3_power_up(tp);
17357 rc = PCI_ERS_RESULT_RECOVERED;
17366 * tg3_io_resume - called when traffic can start flowing again.
17367 * @pdev: Pointer to PCI device
17369 * This callback is called when the error recovery driver tells
17370 * us that its OK to resume normal operation.
17372 static void tg3_io_resume(struct pci_dev *pdev)
17374 struct net_device *netdev = pci_get_drvdata(pdev);
17375 struct tg3 *tp = netdev_priv(netdev);
17380 if (!netif_running(netdev))
17383 tg3_full_lock(tp, 0);
17384 tg3_flag_set(tp, INIT_COMPLETE);
17385 err = tg3_restart_hw(tp, 1);
17387 tg3_full_unlock(tp);
17388 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17392 netif_device_attach(netdev);
17394 tg3_timer_start(tp);
17396 tg3_netif_start(tp);
17398 tg3_full_unlock(tp);
17406 static const struct pci_error_handlers tg3_err_handler = {
17407 .error_detected = tg3_io_error_detected,
17408 .slot_reset = tg3_io_slot_reset,
17409 .resume = tg3_io_resume
17412 static struct pci_driver tg3_driver = {
17413 .name = DRV_MODULE_NAME,
17414 .id_table = tg3_pci_tbl,
17415 .probe = tg3_init_one,
17416 .remove = tg3_remove_one,
17417 .err_handler = &tg3_err_handler,
17418 .driver.pm = TG3_PM_OPS,
17421 static int __init tg3_init(void)
17423 return pci_register_driver(&tg3_driver);
17426 static void __exit tg3_cleanup(void)
17428 pci_unregister_driver(&tg3_driver);
17431 module_init(tg3_init);
17432 module_exit(tg3_cleanup);