2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 131
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "April 09, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 if (!tg3_flag(tp, ENABLE_APE))
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
773 bit = APE_LOCK_GRANT_DRIVER;
775 bit = 1 << tp->pci_fn;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
790 gnt = TG3_APE_PER_LOCK_GRANT;
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
813 return timeout_us ? 0 : -EBUSY;
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
829 return i == timeout_us / 10;
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 i, bufoff, msgoff, maxlen, apedata;
838 if (!tg3_flag(tp, APE_HAS_NCSI))
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 if (tg3_ape_wait_for_event(tp, 30000))
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
928 if (!tg3_flag(tp, ENABLE_APE))
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
946 event = APE_EVENT_STATUS_STATE_START;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
977 tg3_ape_send_event(tp, event);
980 static void tg3_disable_ints(struct tg3 *tp)
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
990 static void tg3_enable_ints(struct tg3 *tp)
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 tp->coal_now |= tnapi->coal_now;
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1016 tw32(HOSTCC_MODE, tp->coal_now);
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1052 struct tg3 *tp = tnapi->tp;
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1066 static void tg3_switch_clocks(struct tg3 *tp)
1069 u32 orig_clock_ctrl;
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1080 tp->pci_clock_ctrl = clock_ctrl;
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1099 #define PHY_BUSY_LOOPS 5000
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1124 tw32_f(MAC_MI_COM, frame_val);
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1129 frame_val = tr32(MAC_MI_COM);
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1133 frame_val = tr32(MAC_MI_COM);
1141 *val = frame_val & MI_COM_DATA_MASK;
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1186 tw32_f(MAC_MI_COM, frame_val);
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1194 frame_val = tr32(MAC_MI_COM);
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if ((phy_control & BMCR_RESET) == 0) {
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1362 struct tg3 *tp = bp->priv;
1365 spin_lock_bh(&tp->lock);
1367 if (tg3_readphy(tp, reg, &val))
1370 spin_unlock_bh(&tp->lock);
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1377 struct tg3 *tp = bp->priv;
1380 spin_lock_bh(&tp->lock);
1382 if (tg3_writephy(tp, reg, val))
1385 spin_unlock_bh(&tp->lock);
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1398 struct phy_device *phydev;
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1439 tw32(MAC_PHYCFG2, val);
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1473 tw32(MAC_EXT_RGMII_MODE, val);
1476 static void tg3_mdio_start(struct tg3 *tp)
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1487 static int tg3_mdio_init(struct tg3 *tp)
1491 struct phy_device *phydev;
1493 if (tg3_flag(tp, 5717_PLUS)) {
1496 tp->phy_addr = tp->pci_fn + 1;
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1536 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1539 i = mdiobus_register(tp->mdio_bus);
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1592 static void tg3_mdio_fini(struct tg3 *tp)
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1610 tp->last_event_jiffies = jiffies;
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1619 unsigned int delay_cnt;
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1626 if (time_remain < 0)
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1648 if (!tg3_readphy(tp, MII_BMCR, ®))
1650 if (!tg3_readphy(tp, MII_BMSR, ®))
1651 val |= (reg & 0xffff);
1655 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1657 if (!tg3_readphy(tp, MII_LPA, ®))
1658 val |= (reg & 0xffff);
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1665 if (!tg3_readphy(tp, MII_STAT1000, ®))
1666 val |= (reg & 0xffff);
1670 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1685 tg3_phy_gather_ump_data(tp, data);
1687 tg3_wait_for_event_ack(tp);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1696 tg3_generate_fw_event(tp);
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1708 tg3_generate_fw_event(tp);
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 static int tg3_poll_fw(struct tg3 *tp)
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1834 netdev_info(tp->dev, "No firmware running\n");
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1847 static void tg3_link_report(struct tg3 *tp)
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1856 (tp->link_config.active_speed == SPEED_100 ?
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1871 tg3_ump_link_report(tp);
1874 tp->link_up = netif_carrier_ok(tp->dev);
1877 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1881 if (adv & ADVERTISE_PAUSE_CAP) {
1882 flowctrl |= FLOW_CTRL_RX;
1883 if (!(adv & ADVERTISE_PAUSE_ASYM))
1884 flowctrl |= FLOW_CTRL_TX;
1885 } else if (adv & ADVERTISE_PAUSE_ASYM)
1886 flowctrl |= FLOW_CTRL_TX;
1891 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1895 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1896 miireg = ADVERTISE_1000XPAUSE;
1897 else if (flow_ctrl & FLOW_CTRL_TX)
1898 miireg = ADVERTISE_1000XPSE_ASYM;
1899 else if (flow_ctrl & FLOW_CTRL_RX)
1900 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1907 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1911 if (adv & ADVERTISE_1000XPAUSE) {
1912 flowctrl |= FLOW_CTRL_RX;
1913 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1914 flowctrl |= FLOW_CTRL_TX;
1915 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1916 flowctrl |= FLOW_CTRL_TX;
1921 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1925 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1926 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1927 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1928 if (lcladv & ADVERTISE_1000XPAUSE)
1930 if (rmtadv & ADVERTISE_1000XPAUSE)
1937 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1941 u32 old_rx_mode = tp->rx_mode;
1942 u32 old_tx_mode = tp->tx_mode;
1944 if (tg3_flag(tp, USE_PHYLIB))
1945 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1947 autoneg = tp->link_config.autoneg;
1949 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1950 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1951 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1953 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1955 flowctrl = tp->link_config.flowctrl;
1957 tp->link_config.active_flowctrl = flowctrl;
1959 if (flowctrl & FLOW_CTRL_RX)
1960 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1962 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1964 if (old_rx_mode != tp->rx_mode)
1965 tw32_f(MAC_RX_MODE, tp->rx_mode);
1967 if (flowctrl & FLOW_CTRL_TX)
1968 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1970 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1972 if (old_tx_mode != tp->tx_mode)
1973 tw32_f(MAC_TX_MODE, tp->tx_mode);
1976 static void tg3_adjust_link(struct net_device *dev)
1978 u8 oldflowctrl, linkmesg = 0;
1979 u32 mac_mode, lcl_adv, rmt_adv;
1980 struct tg3 *tp = netdev_priv(dev);
1981 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1983 spin_lock_bh(&tp->lock);
1985 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1986 MAC_MODE_HALF_DUPLEX);
1988 oldflowctrl = tp->link_config.active_flowctrl;
1994 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1995 mac_mode |= MAC_MODE_PORT_MODE_MII;
1996 else if (phydev->speed == SPEED_1000 ||
1997 tg3_asic_rev(tp) != ASIC_REV_5785)
1998 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2000 mac_mode |= MAC_MODE_PORT_MODE_MII;
2002 if (phydev->duplex == DUPLEX_HALF)
2003 mac_mode |= MAC_MODE_HALF_DUPLEX;
2005 lcl_adv = mii_advertise_flowctrl(
2006 tp->link_config.flowctrl);
2009 rmt_adv = LPA_PAUSE_CAP;
2010 if (phydev->asym_pause)
2011 rmt_adv |= LPA_PAUSE_ASYM;
2014 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2016 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2018 if (mac_mode != tp->mac_mode) {
2019 tp->mac_mode = mac_mode;
2020 tw32_f(MAC_MODE, tp->mac_mode);
2024 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2025 if (phydev->speed == SPEED_10)
2027 MAC_MI_STAT_10MBPS_MODE |
2028 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2030 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2033 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2034 tw32(MAC_TX_LENGTHS,
2035 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2036 (6 << TX_LENGTHS_IPG_SHIFT) |
2037 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2039 tw32(MAC_TX_LENGTHS,
2040 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2041 (6 << TX_LENGTHS_IPG_SHIFT) |
2042 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2044 if (phydev->link != tp->old_link ||
2045 phydev->speed != tp->link_config.active_speed ||
2046 phydev->duplex != tp->link_config.active_duplex ||
2047 oldflowctrl != tp->link_config.active_flowctrl)
2050 tp->old_link = phydev->link;
2051 tp->link_config.active_speed = phydev->speed;
2052 tp->link_config.active_duplex = phydev->duplex;
2054 spin_unlock_bh(&tp->lock);
2057 tg3_link_report(tp);
2060 static int tg3_phy_init(struct tg3 *tp)
2062 struct phy_device *phydev;
2064 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2067 /* Bring the PHY back to a known state. */
2070 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2072 /* Attach the MAC to the PHY. */
2073 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2074 tg3_adjust_link, phydev->interface);
2075 if (IS_ERR(phydev)) {
2076 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2077 return PTR_ERR(phydev);
2080 /* Mask with MAC supported features. */
2081 switch (phydev->interface) {
2082 case PHY_INTERFACE_MODE_GMII:
2083 case PHY_INTERFACE_MODE_RGMII:
2084 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2085 phydev->supported &= (PHY_GBIT_FEATURES |
2087 SUPPORTED_Asym_Pause);
2091 case PHY_INTERFACE_MODE_MII:
2092 phydev->supported &= (PHY_BASIC_FEATURES |
2094 SUPPORTED_Asym_Pause);
2097 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2101 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2103 phydev->advertising = phydev->supported;
2108 static void tg3_phy_start(struct tg3 *tp)
2110 struct phy_device *phydev;
2112 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2115 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2117 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2118 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2119 phydev->speed = tp->link_config.speed;
2120 phydev->duplex = tp->link_config.duplex;
2121 phydev->autoneg = tp->link_config.autoneg;
2122 phydev->advertising = tp->link_config.advertising;
2127 phy_start_aneg(phydev);
2130 static void tg3_phy_stop(struct tg3 *tp)
2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2138 static void tg3_phy_fini(struct tg3 *tp)
2140 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2141 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2142 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2146 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2151 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2154 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2155 /* Cannot do read-modify-write on 5401 */
2156 err = tg3_phy_auxctl_write(tp,
2157 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2158 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2163 err = tg3_phy_auxctl_read(tp,
2164 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2168 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2169 err = tg3_phy_auxctl_write(tp,
2170 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2176 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2180 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2183 tg3_writephy(tp, MII_TG3_FET_TEST,
2184 phytest | MII_TG3_FET_SHADOW_EN);
2185 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2187 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2189 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2190 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2192 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2196 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2200 if (!tg3_flag(tp, 5705_PLUS) ||
2201 (tg3_flag(tp, 5717_PLUS) &&
2202 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2205 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2206 tg3_phy_fet_toggle_apd(tp, enable);
2210 reg = MII_TG3_MISC_SHDW_WREN |
2211 MII_TG3_MISC_SHDW_SCR5_SEL |
2212 MII_TG3_MISC_SHDW_SCR5_LPED |
2213 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2214 MII_TG3_MISC_SHDW_SCR5_SDTL |
2215 MII_TG3_MISC_SHDW_SCR5_C125OE;
2216 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2217 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2219 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2222 reg = MII_TG3_MISC_SHDW_WREN |
2223 MII_TG3_MISC_SHDW_APD_SEL |
2224 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2226 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2228 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2231 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2235 if (!tg3_flag(tp, 5705_PLUS) ||
2236 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2239 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2242 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2243 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2245 tg3_writephy(tp, MII_TG3_FET_TEST,
2246 ephy | MII_TG3_FET_SHADOW_EN);
2247 if (!tg3_readphy(tp, reg, &phy)) {
2249 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2251 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2252 tg3_writephy(tp, reg, phy);
2254 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2259 ret = tg3_phy_auxctl_read(tp,
2260 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2263 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2265 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2266 tg3_phy_auxctl_write(tp,
2267 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2272 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2277 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2280 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2282 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2283 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2286 static void tg3_phy_apply_otp(struct tg3 *tp)
2295 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2298 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2299 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2300 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2302 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2303 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2304 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2306 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2307 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2308 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2310 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2311 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2313 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2314 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2316 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2317 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2318 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2320 tg3_phy_toggle_auxctl_smdsp(tp, false);
2323 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2327 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2332 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2334 tp->link_config.active_duplex == DUPLEX_FULL &&
2335 (tp->link_config.active_speed == SPEED_100 ||
2336 tp->link_config.active_speed == SPEED_1000)) {
2339 if (tp->link_config.active_speed == SPEED_1000)
2340 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2342 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2344 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2346 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2347 TG3_CL45_D7_EEERES_STAT, &val);
2349 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2354 if (!tp->setlpicnt) {
2355 if (current_link_up &&
2356 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2357 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2358 tg3_phy_toggle_auxctl_smdsp(tp, false);
2361 val = tr32(TG3_CPMU_EEE_MODE);
2362 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2366 static void tg3_phy_eee_enable(struct tg3 *tp)
2370 if (tp->link_config.active_speed == SPEED_1000 &&
2371 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2372 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2373 tg3_flag(tp, 57765_CLASS)) &&
2374 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2375 val = MII_TG3_DSP_TAP26_ALNOKO |
2376 MII_TG3_DSP_TAP26_RMRXSTO;
2377 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2378 tg3_phy_toggle_auxctl_smdsp(tp, false);
2381 val = tr32(TG3_CPMU_EEE_MODE);
2382 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2385 static int tg3_wait_macro_done(struct tg3 *tp)
2392 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2393 if ((tmp32 & 0x1000) == 0)
2403 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2405 static const u32 test_pat[4][6] = {
2406 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2407 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2408 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2409 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2413 for (chan = 0; chan < 4; chan++) {
2416 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2417 (chan * 0x2000) | 0x0200);
2418 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2420 for (i = 0; i < 6; i++)
2421 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2424 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2425 if (tg3_wait_macro_done(tp)) {
2430 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2431 (chan * 0x2000) | 0x0200);
2432 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2433 if (tg3_wait_macro_done(tp)) {
2438 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2439 if (tg3_wait_macro_done(tp)) {
2444 for (i = 0; i < 6; i += 2) {
2447 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2448 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2449 tg3_wait_macro_done(tp)) {
2455 if (low != test_pat[chan][i] ||
2456 high != test_pat[chan][i+1]) {
2457 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2458 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2459 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2469 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2473 for (chan = 0; chan < 4; chan++) {
2476 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477 (chan * 0x2000) | 0x0200);
2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479 for (i = 0; i < 6; i++)
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2481 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482 if (tg3_wait_macro_done(tp))
2489 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2491 u32 reg32, phy9_orig;
2492 int retries, do_phy_reset, err;
2498 err = tg3_bmcr_reset(tp);
2504 /* Disable transmitter and interrupt. */
2505 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2509 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2511 /* Set full-duplex, 1000 mbps. */
2512 tg3_writephy(tp, MII_BMCR,
2513 BMCR_FULLDPLX | BMCR_SPEED1000);
2515 /* Set to master mode. */
2516 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2519 tg3_writephy(tp, MII_CTRL1000,
2520 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2522 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2526 /* Block the PHY control access. */
2527 tg3_phydsp_write(tp, 0x8005, 0x0800);
2529 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2532 } while (--retries);
2534 err = tg3_phy_reset_chanpat(tp);
2538 tg3_phydsp_write(tp, 0x8005, 0x0000);
2540 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2541 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2543 tg3_phy_toggle_auxctl_smdsp(tp, false);
2545 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2547 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2549 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2556 static void tg3_carrier_off(struct tg3 *tp)
2558 netif_carrier_off(tp->dev);
2559 tp->link_up = false;
2562 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2564 if (tg3_flag(tp, ENABLE_ASF))
2565 netdev_warn(tp->dev,
2566 "Management side-band traffic will be interrupted during phy settings change\n");
2569 /* This will reset the tigon3 PHY if there is no valid
2570 * link unless the FORCE argument is non-zero.
2572 static int tg3_phy_reset(struct tg3 *tp)
2577 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2578 val = tr32(GRC_MISC_CFG);
2579 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2582 err = tg3_readphy(tp, MII_BMSR, &val);
2583 err |= tg3_readphy(tp, MII_BMSR, &val);
2587 if (netif_running(tp->dev) && tp->link_up) {
2588 netif_carrier_off(tp->dev);
2589 tg3_link_report(tp);
2592 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2593 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2594 tg3_asic_rev(tp) == ASIC_REV_5705) {
2595 err = tg3_phy_reset_5703_4_5(tp);
2602 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2603 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2604 cpmuctrl = tr32(TG3_CPMU_CTRL);
2605 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2607 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2610 err = tg3_bmcr_reset(tp);
2614 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2615 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2616 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2618 tw32(TG3_CPMU_CTRL, cpmuctrl);
2621 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2622 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2623 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2624 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2625 CPMU_LSPD_1000MB_MACCLK_12_5) {
2626 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2628 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2632 if (tg3_flag(tp, 5717_PLUS) &&
2633 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2636 tg3_phy_apply_otp(tp);
2638 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2639 tg3_phy_toggle_apd(tp, true);
2641 tg3_phy_toggle_apd(tp, false);
2644 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2645 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2646 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2647 tg3_phydsp_write(tp, 0x000a, 0x0323);
2648 tg3_phy_toggle_auxctl_smdsp(tp, false);
2651 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2652 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2653 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2656 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2657 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2658 tg3_phydsp_write(tp, 0x000a, 0x310b);
2659 tg3_phydsp_write(tp, 0x201f, 0x9506);
2660 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2661 tg3_phy_toggle_auxctl_smdsp(tp, false);
2663 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2664 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2665 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2666 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2667 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2668 tg3_writephy(tp, MII_TG3_TEST1,
2669 MII_TG3_TEST1_TRIM_EN | 0x4);
2671 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2673 tg3_phy_toggle_auxctl_smdsp(tp, false);
2677 /* Set Extended packet length bit (bit 14) on all chips that */
2678 /* support jumbo frames */
2679 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2680 /* Cannot do read-modify-write on 5401 */
2681 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2682 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2683 /* Set bit 14 with read-modify-write to preserve other bits */
2684 err = tg3_phy_auxctl_read(tp,
2685 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2687 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2688 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2691 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2692 * jumbo frames transmission.
2694 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2695 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2696 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2697 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2700 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2701 /* adjust output voltage */
2702 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2705 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2706 tg3_phydsp_write(tp, 0xffb, 0x4000);
2708 tg3_phy_toggle_automdix(tp, true);
2709 tg3_phy_set_wirespeed(tp);
2713 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2714 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2715 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2716 TG3_GPIO_MSG_NEED_VAUX)
2717 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2718 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2719 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2720 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2721 (TG3_GPIO_MSG_DRVR_PRES << 12))
2723 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2724 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2725 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2726 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2727 (TG3_GPIO_MSG_NEED_VAUX << 12))
2729 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2733 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2734 tg3_asic_rev(tp) == ASIC_REV_5719)
2735 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2737 status = tr32(TG3_CPMU_DRV_STATUS);
2739 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2740 status &= ~(TG3_GPIO_MSG_MASK << shift);
2741 status |= (newstat << shift);
2743 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2744 tg3_asic_rev(tp) == ASIC_REV_5719)
2745 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2747 tw32(TG3_CPMU_DRV_STATUS, status);
2749 return status >> TG3_APE_GPIO_MSG_SHIFT;
2752 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2754 if (!tg3_flag(tp, IS_NIC))
2757 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2758 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2759 tg3_asic_rev(tp) == ASIC_REV_5720) {
2760 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2763 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2765 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2768 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2770 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2771 TG3_GRC_LCLCTL_PWRSW_DELAY);
2777 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2781 if (!tg3_flag(tp, IS_NIC) ||
2782 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2783 tg3_asic_rev(tp) == ASIC_REV_5701)
2786 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2788 tw32_wait_f(GRC_LOCAL_CTRL,
2789 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2790 TG3_GRC_LCLCTL_PWRSW_DELAY);
2792 tw32_wait_f(GRC_LOCAL_CTRL,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY);
2796 tw32_wait_f(GRC_LOCAL_CTRL,
2797 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY);
2801 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2803 if (!tg3_flag(tp, IS_NIC))
2806 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2807 tg3_asic_rev(tp) == ASIC_REV_5701) {
2808 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2809 (GRC_LCLCTRL_GPIO_OE0 |
2810 GRC_LCLCTRL_GPIO_OE1 |
2811 GRC_LCLCTRL_GPIO_OE2 |
2812 GRC_LCLCTRL_GPIO_OUTPUT0 |
2813 GRC_LCLCTRL_GPIO_OUTPUT1),
2814 TG3_GRC_LCLCTL_PWRSW_DELAY);
2815 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2816 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2817 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2818 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2819 GRC_LCLCTRL_GPIO_OE1 |
2820 GRC_LCLCTRL_GPIO_OE2 |
2821 GRC_LCLCTRL_GPIO_OUTPUT0 |
2822 GRC_LCLCTRL_GPIO_OUTPUT1 |
2824 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2825 TG3_GRC_LCLCTL_PWRSW_DELAY);
2827 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2828 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2831 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2832 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836 u32 grc_local_ctrl = 0;
2838 /* Workaround to prevent overdrawing Amps. */
2839 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2840 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2841 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2843 TG3_GRC_LCLCTL_PWRSW_DELAY);
2846 /* On 5753 and variants, GPIO2 cannot be used. */
2847 no_gpio2 = tp->nic_sram_data_cfg &
2848 NIC_SRAM_DATA_CFG_NO_GPIO2;
2850 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2851 GRC_LCLCTRL_GPIO_OE1 |
2852 GRC_LCLCTRL_GPIO_OE2 |
2853 GRC_LCLCTRL_GPIO_OUTPUT1 |
2854 GRC_LCLCTRL_GPIO_OUTPUT2;
2856 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2857 GRC_LCLCTRL_GPIO_OUTPUT2);
2859 tw32_wait_f(GRC_LOCAL_CTRL,
2860 tp->grc_local_ctrl | grc_local_ctrl,
2861 TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2865 tw32_wait_f(GRC_LOCAL_CTRL,
2866 tp->grc_local_ctrl | grc_local_ctrl,
2867 TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2871 tw32_wait_f(GRC_LOCAL_CTRL,
2872 tp->grc_local_ctrl | grc_local_ctrl,
2873 TG3_GRC_LCLCTL_PWRSW_DELAY);
2878 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2882 /* Serialize power state transitions */
2883 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2886 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2887 msg = TG3_GPIO_MSG_NEED_VAUX;
2889 msg = tg3_set_function_status(tp, msg);
2891 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2894 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2895 tg3_pwrsrc_switch_to_vaux(tp);
2897 tg3_pwrsrc_die_with_vmain(tp);
2900 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2903 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2905 bool need_vaux = false;
2907 /* The GPIOs do something completely different on 57765. */
2908 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2911 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2912 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2913 tg3_asic_rev(tp) == ASIC_REV_5720) {
2914 tg3_frob_aux_power_5717(tp, include_wol ?
2915 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2919 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2920 struct net_device *dev_peer;
2922 dev_peer = pci_get_drvdata(tp->pdev_peer);
2924 /* remove_one() may have been run on the peer. */
2926 struct tg3 *tp_peer = netdev_priv(dev_peer);
2928 if (tg3_flag(tp_peer, INIT_COMPLETE))
2931 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2932 tg3_flag(tp_peer, ENABLE_ASF))
2937 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2938 tg3_flag(tp, ENABLE_ASF))
2942 tg3_pwrsrc_switch_to_vaux(tp);
2944 tg3_pwrsrc_die_with_vmain(tp);
2947 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2949 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2951 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2952 if (speed != SPEED_10)
2954 } else if (speed == SPEED_10)
2960 static bool tg3_phy_power_bug(struct tg3 *tp)
2962 switch (tg3_asic_rev(tp)) {
2967 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2976 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2985 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2989 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2992 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2993 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2994 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2995 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2998 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2999 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3000 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3005 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3007 val = tr32(GRC_MISC_CFG);
3008 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3011 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3013 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3016 tg3_writephy(tp, MII_ADVERTISE, 0);
3017 tg3_writephy(tp, MII_BMCR,
3018 BMCR_ANENABLE | BMCR_ANRESTART);
3020 tg3_writephy(tp, MII_TG3_FET_TEST,
3021 phytest | MII_TG3_FET_SHADOW_EN);
3022 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3023 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3025 MII_TG3_FET_SHDW_AUXMODE4,
3028 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3031 } else if (do_low_power) {
3032 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3033 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3035 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3036 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3037 MII_TG3_AUXCTL_PCTL_VREG_11V;
3038 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3041 /* The PHY should not be powered down on some chips because
3044 if (tg3_phy_power_bug(tp))
3047 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3048 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3049 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3050 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3051 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3052 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3055 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3058 /* tp->lock is held. */
3059 static int tg3_nvram_lock(struct tg3 *tp)
3061 if (tg3_flag(tp, NVRAM)) {
3064 if (tp->nvram_lock_cnt == 0) {
3065 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3066 for (i = 0; i < 8000; i++) {
3067 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3072 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3076 tp->nvram_lock_cnt++;
3081 /* tp->lock is held. */
3082 static void tg3_nvram_unlock(struct tg3 *tp)
3084 if (tg3_flag(tp, NVRAM)) {
3085 if (tp->nvram_lock_cnt > 0)
3086 tp->nvram_lock_cnt--;
3087 if (tp->nvram_lock_cnt == 0)
3088 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3092 /* tp->lock is held. */
3093 static void tg3_enable_nvram_access(struct tg3 *tp)
3095 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3096 u32 nvaccess = tr32(NVRAM_ACCESS);
3098 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3102 /* tp->lock is held. */
3103 static void tg3_disable_nvram_access(struct tg3 *tp)
3105 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3106 u32 nvaccess = tr32(NVRAM_ACCESS);
3108 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3112 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3113 u32 offset, u32 *val)
3118 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3121 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3122 EEPROM_ADDR_DEVID_MASK |
3124 tw32(GRC_EEPROM_ADDR,
3126 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3127 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3128 EEPROM_ADDR_ADDR_MASK) |
3129 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3131 for (i = 0; i < 1000; i++) {
3132 tmp = tr32(GRC_EEPROM_ADDR);
3134 if (tmp & EEPROM_ADDR_COMPLETE)
3138 if (!(tmp & EEPROM_ADDR_COMPLETE))
3141 tmp = tr32(GRC_EEPROM_DATA);
3144 * The data will always be opposite the native endian
3145 * format. Perform a blind byteswap to compensate.
3152 #define NVRAM_CMD_TIMEOUT 10000
3154 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3158 tw32(NVRAM_CMD, nvram_cmd);
3159 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3161 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3167 if (i == NVRAM_CMD_TIMEOUT)
3173 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3175 if (tg3_flag(tp, NVRAM) &&
3176 tg3_flag(tp, NVRAM_BUFFERED) &&
3177 tg3_flag(tp, FLASH) &&
3178 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3179 (tp->nvram_jedecnum == JEDEC_ATMEL))
3181 addr = ((addr / tp->nvram_pagesize) <<
3182 ATMEL_AT45DB0X1B_PAGE_POS) +
3183 (addr % tp->nvram_pagesize);
3188 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3190 if (tg3_flag(tp, NVRAM) &&
3191 tg3_flag(tp, NVRAM_BUFFERED) &&
3192 tg3_flag(tp, FLASH) &&
3193 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3194 (tp->nvram_jedecnum == JEDEC_ATMEL))
3196 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3197 tp->nvram_pagesize) +
3198 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3203 /* NOTE: Data read in from NVRAM is byteswapped according to
3204 * the byteswapping settings for all other register accesses.
3205 * tg3 devices are BE devices, so on a BE machine, the data
3206 * returned will be exactly as it is seen in NVRAM. On a LE
3207 * machine, the 32-bit value will be byteswapped.
3209 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3213 if (!tg3_flag(tp, NVRAM))
3214 return tg3_nvram_read_using_eeprom(tp, offset, val);
3216 offset = tg3_nvram_phys_addr(tp, offset);
3218 if (offset > NVRAM_ADDR_MSK)
3221 ret = tg3_nvram_lock(tp);
3225 tg3_enable_nvram_access(tp);
3227 tw32(NVRAM_ADDR, offset);
3228 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3229 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3232 *val = tr32(NVRAM_RDDATA);
3234 tg3_disable_nvram_access(tp);
3236 tg3_nvram_unlock(tp);
3241 /* Ensures NVRAM data is in bytestream format. */
3242 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3245 int res = tg3_nvram_read(tp, offset, &v);
3247 *val = cpu_to_be32(v);
3251 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3252 u32 offset, u32 len, u8 *buf)
3257 for (i = 0; i < len; i += 4) {
3263 memcpy(&data, buf + i, 4);
3266 * The SEEPROM interface expects the data to always be opposite
3267 * the native endian format. We accomplish this by reversing
3268 * all the operations that would have been performed on the
3269 * data from a call to tg3_nvram_read_be32().
3271 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3273 val = tr32(GRC_EEPROM_ADDR);
3274 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3276 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3278 tw32(GRC_EEPROM_ADDR, val |
3279 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3280 (addr & EEPROM_ADDR_ADDR_MASK) |
3284 for (j = 0; j < 1000; j++) {
3285 val = tr32(GRC_EEPROM_ADDR);
3287 if (val & EEPROM_ADDR_COMPLETE)
3291 if (!(val & EEPROM_ADDR_COMPLETE)) {
3300 /* offset and length are dword aligned */
3301 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3305 u32 pagesize = tp->nvram_pagesize;
3306 u32 pagemask = pagesize - 1;
3310 tmp = kmalloc(pagesize, GFP_KERNEL);
3316 u32 phy_addr, page_off, size;
3318 phy_addr = offset & ~pagemask;
3320 for (j = 0; j < pagesize; j += 4) {
3321 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3322 (__be32 *) (tmp + j));
3329 page_off = offset & pagemask;
3336 memcpy(tmp + page_off, buf, size);
3338 offset = offset + (pagesize - page_off);
3340 tg3_enable_nvram_access(tp);
3343 * Before we can erase the flash page, we need
3344 * to issue a special "write enable" command.
3346 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3348 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3351 /* Erase the target page */
3352 tw32(NVRAM_ADDR, phy_addr);
3354 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3355 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3357 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3360 /* Issue another write enable to start the write. */
3361 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3363 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3366 for (j = 0; j < pagesize; j += 4) {
3369 data = *((__be32 *) (tmp + j));
3371 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3373 tw32(NVRAM_ADDR, phy_addr + j);
3375 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3379 nvram_cmd |= NVRAM_CMD_FIRST;
3380 else if (j == (pagesize - 4))
3381 nvram_cmd |= NVRAM_CMD_LAST;
3383 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3391 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3392 tg3_nvram_exec_cmd(tp, nvram_cmd);
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3405 for (i = 0; i < len; i += 4, offset += 4) {
3406 u32 page_off, phy_addr, nvram_cmd;
3409 memcpy(&data, buf + i, 4);
3410 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3412 page_off = offset % tp->nvram_pagesize;
3414 phy_addr = tg3_nvram_phys_addr(tp, offset);
3416 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3418 if (page_off == 0 || i == 0)
3419 nvram_cmd |= NVRAM_CMD_FIRST;
3420 if (page_off == (tp->nvram_pagesize - 4))
3421 nvram_cmd |= NVRAM_CMD_LAST;
3424 nvram_cmd |= NVRAM_CMD_LAST;
3426 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3427 !tg3_flag(tp, FLASH) ||
3428 !tg3_flag(tp, 57765_PLUS))
3429 tw32(NVRAM_ADDR, phy_addr);
3431 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3432 !tg3_flag(tp, 5755_PLUS) &&
3433 (tp->nvram_jedecnum == JEDEC_ST) &&
3434 (nvram_cmd & NVRAM_CMD_FIRST)) {
3437 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3438 ret = tg3_nvram_exec_cmd(tp, cmd);
3442 if (!tg3_flag(tp, FLASH)) {
3443 /* We always do complete word writes to eeprom. */
3444 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3447 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3454 /* offset and length are dword aligned */
3455 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3459 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3460 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3461 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3465 if (!tg3_flag(tp, NVRAM)) {
3466 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3470 ret = tg3_nvram_lock(tp);
3474 tg3_enable_nvram_access(tp);
3475 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3476 tw32(NVRAM_WRITE1, 0x406);
3478 grc_mode = tr32(GRC_MODE);
3479 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3481 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3482 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3485 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3489 grc_mode = tr32(GRC_MODE);
3490 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3492 tg3_disable_nvram_access(tp);
3493 tg3_nvram_unlock(tp);
3496 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3497 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3504 #define RX_CPU_SCRATCH_BASE 0x30000
3505 #define RX_CPU_SCRATCH_SIZE 0x04000
3506 #define TX_CPU_SCRATCH_BASE 0x34000
3507 #define TX_CPU_SCRATCH_SIZE 0x04000
3509 /* tp->lock is held. */
3510 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3513 const int iters = 10000;
3515 for (i = 0; i < iters; i++) {
3516 tw32(cpu_base + CPU_STATE, 0xffffffff);
3517 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3518 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3522 return (i == iters) ? -EBUSY : 0;
3525 /* tp->lock is held. */
3526 static int tg3_rxcpu_pause(struct tg3 *tp)
3528 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3530 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3531 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3537 /* tp->lock is held. */
3538 static int tg3_txcpu_pause(struct tg3 *tp)
3540 return tg3_pause_cpu(tp, TX_CPU_BASE);
3543 /* tp->lock is held. */
3544 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3546 tw32(cpu_base + CPU_STATE, 0xffffffff);
3547 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3550 /* tp->lock is held. */
3551 static void tg3_rxcpu_resume(struct tg3 *tp)
3553 tg3_resume_cpu(tp, RX_CPU_BASE);
3556 /* tp->lock is held. */
3557 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3561 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3563 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3564 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3566 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3569 if (cpu_base == RX_CPU_BASE) {
3570 rc = tg3_rxcpu_pause(tp);
3573 * There is only an Rx CPU for the 5750 derivative in the
3576 if (tg3_flag(tp, IS_SSB_CORE))
3579 rc = tg3_txcpu_pause(tp);
3583 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3584 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3588 /* Clear firmware's nvram arbitration. */
3589 if (tg3_flag(tp, NVRAM))
3590 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3594 static int tg3_fw_data_len(struct tg3 *tp,
3595 const struct tg3_firmware_hdr *fw_hdr)
3599 /* Non fragmented firmware have one firmware header followed by a
3600 * contiguous chunk of data to be written. The length field in that
3601 * header is not the length of data to be written but the complete
3602 * length of the bss. The data length is determined based on
3603 * tp->fw->size minus headers.
3605 * Fragmented firmware have a main header followed by multiple
3606 * fragments. Each fragment is identical to non fragmented firmware
3607 * with a firmware header followed by a contiguous chunk of data. In
3608 * the main header, the length field is unused and set to 0xffffffff.
3609 * In each fragment header the length is the entire size of that
3610 * fragment i.e. fragment data + header length. Data length is
3611 * therefore length field in the header minus TG3_FW_HDR_LEN.
3613 if (tp->fw_len == 0xffffffff)
3614 fw_len = be32_to_cpu(fw_hdr->len);
3616 fw_len = tp->fw->size;
3618 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3621 /* tp->lock is held. */
3622 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3623 u32 cpu_scratch_base, int cpu_scratch_size,
3624 const struct tg3_firmware_hdr *fw_hdr)
3627 void (*write_op)(struct tg3 *, u32, u32);
3628 int total_len = tp->fw->size;
3630 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3632 "%s: Trying to load TX cpu firmware which is 5705\n",
3637 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3638 write_op = tg3_write_mem;
3640 write_op = tg3_write_indirect_reg32;
3642 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3643 /* It is possible that bootcode is still loading at this point.
3644 * Get the nvram lock first before halting the cpu.
3646 int lock_err = tg3_nvram_lock(tp);
3647 err = tg3_halt_cpu(tp, cpu_base);
3649 tg3_nvram_unlock(tp);
3653 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3654 write_op(tp, cpu_scratch_base + i, 0);
3655 tw32(cpu_base + CPU_STATE, 0xffffffff);
3656 tw32(cpu_base + CPU_MODE,
3657 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3659 /* Subtract additional main header for fragmented firmware and
3660 * advance to the first fragment
3662 total_len -= TG3_FW_HDR_LEN;
3667 u32 *fw_data = (u32 *)(fw_hdr + 1);
3668 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3669 write_op(tp, cpu_scratch_base +
3670 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3672 be32_to_cpu(fw_data[i]));
3674 total_len -= be32_to_cpu(fw_hdr->len);
3676 /* Advance to next fragment */
3677 fw_hdr = (struct tg3_firmware_hdr *)
3678 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3679 } while (total_len > 0);
3687 /* tp->lock is held. */
3688 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3691 const int iters = 5;
3693 tw32(cpu_base + CPU_STATE, 0xffffffff);
3694 tw32_f(cpu_base + CPU_PC, pc);
3696 for (i = 0; i < iters; i++) {
3697 if (tr32(cpu_base + CPU_PC) == pc)
3699 tw32(cpu_base + CPU_STATE, 0xffffffff);
3700 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3701 tw32_f(cpu_base + CPU_PC, pc);
3705 return (i == iters) ? -EBUSY : 0;
3708 /* tp->lock is held. */
3709 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3711 const struct tg3_firmware_hdr *fw_hdr;
3714 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3716 /* Firmware blob starts with version numbers, followed by
3717 start address and length. We are setting complete length.
3718 length = end_address_of_bss - start_address_of_text.
3719 Remainder is the blob to be loaded contiguously
3720 from start address. */
3722 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3723 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3728 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3729 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3734 /* Now startup only the RX cpu. */
3735 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3736 be32_to_cpu(fw_hdr->base_addr));
3738 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3739 "should be %08x\n", __func__,
3740 tr32(RX_CPU_BASE + CPU_PC),
3741 be32_to_cpu(fw_hdr->base_addr));
3745 tg3_rxcpu_resume(tp);
3750 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3752 const int iters = 1000;
3756 /* Wait for boot code to complete initialization and enter service
3757 * loop. It is then safe to download service patches
3759 for (i = 0; i < iters; i++) {
3760 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3767 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3771 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3773 netdev_warn(tp->dev,
3774 "Other patches exist. Not downloading EEE patch\n");
3781 /* tp->lock is held. */
3782 static void tg3_load_57766_firmware(struct tg3 *tp)
3784 struct tg3_firmware_hdr *fw_hdr;
3786 if (!tg3_flag(tp, NO_NVRAM))
3789 if (tg3_validate_rxcpu_state(tp))
3795 /* This firmware blob has a different format than older firmware
3796 * releases as given below. The main difference is we have fragmented
3797 * data to be written to non-contiguous locations.
3799 * In the beginning we have a firmware header identical to other
3800 * firmware which consists of version, base addr and length. The length
3801 * here is unused and set to 0xffffffff.
3803 * This is followed by a series of firmware fragments which are
3804 * individually identical to previous firmware. i.e. they have the
3805 * firmware header and followed by data for that fragment. The version
3806 * field of the individual fragment header is unused.
3809 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3810 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3813 if (tg3_rxcpu_pause(tp))
3816 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3817 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3819 tg3_rxcpu_resume(tp);
3822 /* tp->lock is held. */
3823 static int tg3_load_tso_firmware(struct tg3 *tp)
3825 const struct tg3_firmware_hdr *fw_hdr;
3826 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3829 if (!tg3_flag(tp, FW_TSO))
3832 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3834 /* Firmware blob starts with version numbers, followed by
3835 start address and length. We are setting complete length.
3836 length = end_address_of_bss - start_address_of_text.
3837 Remainder is the blob to be loaded contiguously
3838 from start address. */
3840 cpu_scratch_size = tp->fw_len;
3842 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3843 cpu_base = RX_CPU_BASE;
3844 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3846 cpu_base = TX_CPU_BASE;
3847 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3848 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3851 err = tg3_load_firmware_cpu(tp, cpu_base,
3852 cpu_scratch_base, cpu_scratch_size,
3857 /* Now startup the cpu. */
3858 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3859 be32_to_cpu(fw_hdr->base_addr));
3862 "%s fails to set CPU PC, is %08x should be %08x\n",
3863 __func__, tr32(cpu_base + CPU_PC),
3864 be32_to_cpu(fw_hdr->base_addr));
3868 tg3_resume_cpu(tp, cpu_base);
3873 /* tp->lock is held. */
3874 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3876 u32 addr_high, addr_low;
3879 addr_high = ((tp->dev->dev_addr[0] << 8) |
3880 tp->dev->dev_addr[1]);
3881 addr_low = ((tp->dev->dev_addr[2] << 24) |
3882 (tp->dev->dev_addr[3] << 16) |
3883 (tp->dev->dev_addr[4] << 8) |
3884 (tp->dev->dev_addr[5] << 0));
3885 for (i = 0; i < 4; i++) {
3886 if (i == 1 && skip_mac_1)
3888 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3889 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3892 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3893 tg3_asic_rev(tp) == ASIC_REV_5704) {
3894 for (i = 0; i < 12; i++) {
3895 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3896 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3900 addr_high = (tp->dev->dev_addr[0] +
3901 tp->dev->dev_addr[1] +
3902 tp->dev->dev_addr[2] +
3903 tp->dev->dev_addr[3] +
3904 tp->dev->dev_addr[4] +
3905 tp->dev->dev_addr[5]) &
3906 TX_BACKOFF_SEED_MASK;
3907 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3910 static void tg3_enable_register_access(struct tg3 *tp)
3913 * Make sure register accesses (indirect or otherwise) will function
3916 pci_write_config_dword(tp->pdev,
3917 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3920 static int tg3_power_up(struct tg3 *tp)
3924 tg3_enable_register_access(tp);
3926 err = pci_set_power_state(tp->pdev, PCI_D0);
3928 /* Switch out of Vaux if it is a NIC */
3929 tg3_pwrsrc_switch_to_vmain(tp);
3931 netdev_err(tp->dev, "Transition to D0 failed\n");
3937 static int tg3_setup_phy(struct tg3 *, bool);
3939 static int tg3_power_down_prepare(struct tg3 *tp)
3942 bool device_should_wake, do_low_power;
3944 tg3_enable_register_access(tp);
3946 /* Restore the CLKREQ setting. */
3947 if (tg3_flag(tp, CLKREQ_BUG))
3948 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3949 PCI_EXP_LNKCTL_CLKREQ_EN);
3951 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3952 tw32(TG3PCI_MISC_HOST_CTRL,
3953 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3955 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3956 tg3_flag(tp, WOL_ENABLE);
3958 if (tg3_flag(tp, USE_PHYLIB)) {
3959 do_low_power = false;
3960 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3961 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3962 struct phy_device *phydev;
3963 u32 phyid, advertising;
3965 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3967 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3969 tp->link_config.speed = phydev->speed;
3970 tp->link_config.duplex = phydev->duplex;
3971 tp->link_config.autoneg = phydev->autoneg;
3972 tp->link_config.advertising = phydev->advertising;
3974 advertising = ADVERTISED_TP |
3976 ADVERTISED_Autoneg |
3977 ADVERTISED_10baseT_Half;
3979 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3980 if (tg3_flag(tp, WOL_SPEED_100MB))
3982 ADVERTISED_100baseT_Half |
3983 ADVERTISED_100baseT_Full |
3984 ADVERTISED_10baseT_Full;
3986 advertising |= ADVERTISED_10baseT_Full;
3989 phydev->advertising = advertising;
3991 phy_start_aneg(phydev);
3993 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3994 if (phyid != PHY_ID_BCMAC131) {
3995 phyid &= PHY_BCM_OUI_MASK;
3996 if (phyid == PHY_BCM_OUI_1 ||
3997 phyid == PHY_BCM_OUI_2 ||
3998 phyid == PHY_BCM_OUI_3)
3999 do_low_power = true;
4003 do_low_power = true;
4005 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4006 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4008 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4009 tg3_setup_phy(tp, false);
4012 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4015 val = tr32(GRC_VCPU_EXT_CTRL);
4016 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4017 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4021 for (i = 0; i < 200; i++) {
4022 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4023 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4028 if (tg3_flag(tp, WOL_CAP))
4029 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4030 WOL_DRV_STATE_SHUTDOWN |
4034 if (device_should_wake) {
4037 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4039 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4040 tg3_phy_auxctl_write(tp,
4041 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4042 MII_TG3_AUXCTL_PCTL_WOL_EN |
4043 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4044 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4048 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4049 mac_mode = MAC_MODE_PORT_MODE_GMII;
4050 else if (tp->phy_flags &
4051 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4052 if (tp->link_config.active_speed == SPEED_1000)
4053 mac_mode = MAC_MODE_PORT_MODE_GMII;
4055 mac_mode = MAC_MODE_PORT_MODE_MII;
4057 mac_mode = MAC_MODE_PORT_MODE_MII;
4059 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4060 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4061 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4062 SPEED_100 : SPEED_10;
4063 if (tg3_5700_link_polarity(tp, speed))
4064 mac_mode |= MAC_MODE_LINK_POLARITY;
4066 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4069 mac_mode = MAC_MODE_PORT_MODE_TBI;
4072 if (!tg3_flag(tp, 5750_PLUS))
4073 tw32(MAC_LED_CTRL, tp->led_ctrl);
4075 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4076 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4077 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4078 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4080 if (tg3_flag(tp, ENABLE_APE))
4081 mac_mode |= MAC_MODE_APE_TX_EN |
4082 MAC_MODE_APE_RX_EN |
4083 MAC_MODE_TDE_ENABLE;
4085 tw32_f(MAC_MODE, mac_mode);
4088 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4092 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4093 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4094 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4097 base_val = tp->pci_clock_ctrl;
4098 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4099 CLOCK_CTRL_TXCLK_DISABLE);
4101 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4102 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4103 } else if (tg3_flag(tp, 5780_CLASS) ||
4104 tg3_flag(tp, CPMU_PRESENT) ||
4105 tg3_asic_rev(tp) == ASIC_REV_5906) {
4107 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4108 u32 newbits1, newbits2;
4110 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4111 tg3_asic_rev(tp) == ASIC_REV_5701) {
4112 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4113 CLOCK_CTRL_TXCLK_DISABLE |
4115 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4116 } else if (tg3_flag(tp, 5705_PLUS)) {
4117 newbits1 = CLOCK_CTRL_625_CORE;
4118 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4120 newbits1 = CLOCK_CTRL_ALTCLK;
4121 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4124 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4127 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4130 if (!tg3_flag(tp, 5705_PLUS)) {
4133 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4134 tg3_asic_rev(tp) == ASIC_REV_5701) {
4135 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4136 CLOCK_CTRL_TXCLK_DISABLE |
4137 CLOCK_CTRL_44MHZ_CORE);
4139 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4142 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4143 tp->pci_clock_ctrl | newbits3, 40);
4147 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4148 tg3_power_down_phy(tp, do_low_power);
4150 tg3_frob_aux_power(tp, true);
4152 /* Workaround for unstable PLL clock */
4153 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4154 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4155 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4156 u32 val = tr32(0x7d00);
4158 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4160 if (!tg3_flag(tp, ENABLE_ASF)) {
4163 err = tg3_nvram_lock(tp);
4164 tg3_halt_cpu(tp, RX_CPU_BASE);
4166 tg3_nvram_unlock(tp);
4170 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4175 static void tg3_power_down(struct tg3 *tp)
4177 tg3_power_down_prepare(tp);
4179 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4180 pci_set_power_state(tp->pdev, PCI_D3hot);
4183 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4185 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4186 case MII_TG3_AUX_STAT_10HALF:
4188 *duplex = DUPLEX_HALF;
4191 case MII_TG3_AUX_STAT_10FULL:
4193 *duplex = DUPLEX_FULL;
4196 case MII_TG3_AUX_STAT_100HALF:
4198 *duplex = DUPLEX_HALF;
4201 case MII_TG3_AUX_STAT_100FULL:
4203 *duplex = DUPLEX_FULL;
4206 case MII_TG3_AUX_STAT_1000HALF:
4207 *speed = SPEED_1000;
4208 *duplex = DUPLEX_HALF;
4211 case MII_TG3_AUX_STAT_1000FULL:
4212 *speed = SPEED_1000;
4213 *duplex = DUPLEX_FULL;
4217 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4218 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4220 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4224 *speed = SPEED_UNKNOWN;
4225 *duplex = DUPLEX_UNKNOWN;
4230 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4235 new_adv = ADVERTISE_CSMA;
4236 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4237 new_adv |= mii_advertise_flowctrl(flowctrl);
4239 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4243 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4244 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4246 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4247 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4248 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4250 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4255 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4258 tw32(TG3_CPMU_EEE_MODE,
4259 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4261 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4266 /* Advertise 100-BaseTX EEE ability */
4267 if (advertise & ADVERTISED_100baseT_Full)
4268 val |= MDIO_AN_EEE_ADV_100TX;
4269 /* Advertise 1000-BaseT EEE ability */
4270 if (advertise & ADVERTISED_1000baseT_Full)
4271 val |= MDIO_AN_EEE_ADV_1000T;
4272 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4276 switch (tg3_asic_rev(tp)) {
4278 case ASIC_REV_57765:
4279 case ASIC_REV_57766:
4281 /* If we advertised any eee advertisements above... */
4283 val = MII_TG3_DSP_TAP26_ALNOKO |
4284 MII_TG3_DSP_TAP26_RMRXSTO |
4285 MII_TG3_DSP_TAP26_OPCSINPT;
4286 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4290 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4291 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4292 MII_TG3_DSP_CH34TP2_HIBW01);
4295 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4304 static void tg3_phy_copper_begin(struct tg3 *tp)
4306 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4307 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4310 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4311 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4312 adv = ADVERTISED_10baseT_Half |
4313 ADVERTISED_10baseT_Full;
4314 if (tg3_flag(tp, WOL_SPEED_100MB))
4315 adv |= ADVERTISED_100baseT_Half |
4316 ADVERTISED_100baseT_Full;
4317 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4318 adv |= ADVERTISED_1000baseT_Half |
4319 ADVERTISED_1000baseT_Full;
4321 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4323 adv = tp->link_config.advertising;
4324 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4325 adv &= ~(ADVERTISED_1000baseT_Half |
4326 ADVERTISED_1000baseT_Full);
4328 fc = tp->link_config.flowctrl;
4331 tg3_phy_autoneg_cfg(tp, adv, fc);
4333 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4334 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4335 /* Normally during power down we want to autonegotiate
4336 * the lowest possible speed for WOL. However, to avoid
4337 * link flap, we leave it untouched.
4342 tg3_writephy(tp, MII_BMCR,
4343 BMCR_ANENABLE | BMCR_ANRESTART);
4346 u32 bmcr, orig_bmcr;
4348 tp->link_config.active_speed = tp->link_config.speed;
4349 tp->link_config.active_duplex = tp->link_config.duplex;
4351 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4352 /* With autoneg disabled, 5715 only links up when the
4353 * advertisement register has the configured speed
4356 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4360 switch (tp->link_config.speed) {
4366 bmcr |= BMCR_SPEED100;
4370 bmcr |= BMCR_SPEED1000;
4374 if (tp->link_config.duplex == DUPLEX_FULL)
4375 bmcr |= BMCR_FULLDPLX;
4377 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4378 (bmcr != orig_bmcr)) {
4379 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4380 for (i = 0; i < 1500; i++) {
4384 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4385 tg3_readphy(tp, MII_BMSR, &tmp))
4387 if (!(tmp & BMSR_LSTATUS)) {
4392 tg3_writephy(tp, MII_BMCR, bmcr);
4398 static int tg3_phy_pull_config(struct tg3 *tp)
4403 err = tg3_readphy(tp, MII_BMCR, &val);
4407 if (!(val & BMCR_ANENABLE)) {
4408 tp->link_config.autoneg = AUTONEG_DISABLE;
4409 tp->link_config.advertising = 0;
4410 tg3_flag_clear(tp, PAUSE_AUTONEG);
4414 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4416 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4419 tp->link_config.speed = SPEED_10;
4422 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4425 tp->link_config.speed = SPEED_100;
4427 case BMCR_SPEED1000:
4428 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4429 tp->link_config.speed = SPEED_1000;
4437 if (val & BMCR_FULLDPLX)
4438 tp->link_config.duplex = DUPLEX_FULL;
4440 tp->link_config.duplex = DUPLEX_HALF;
4442 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4448 tp->link_config.autoneg = AUTONEG_ENABLE;
4449 tp->link_config.advertising = ADVERTISED_Autoneg;
4450 tg3_flag_set(tp, PAUSE_AUTONEG);
4452 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4455 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4459 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4460 tp->link_config.advertising |= adv | ADVERTISED_TP;
4462 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4464 tp->link_config.advertising |= ADVERTISED_FIBRE;
4467 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4470 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4471 err = tg3_readphy(tp, MII_CTRL1000, &val);
4475 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4477 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4481 adv = tg3_decode_flowctrl_1000X(val);
4482 tp->link_config.flowctrl = adv;
4484 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4485 adv = mii_adv_to_ethtool_adv_x(val);
4488 tp->link_config.advertising |= adv;
4495 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4499 /* Turn off tap power management. */
4500 /* Set Extended packet length bit */
4501 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4503 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4504 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4505 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4506 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4507 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4514 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4518 u32 advertising = tp->link_config.advertising;
4520 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4523 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4526 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4529 if (advertising & ADVERTISED_100baseT_Full)
4530 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4531 if (advertising & ADVERTISED_1000baseT_Full)
4532 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4540 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4542 u32 advmsk, tgtadv, advertising;
4544 advertising = tp->link_config.advertising;
4545 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4547 advmsk = ADVERTISE_ALL;
4548 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4549 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4550 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4553 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4556 if ((*lcladv & advmsk) != tgtadv)
4559 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4562 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4564 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4568 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4569 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4570 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4571 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4572 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4574 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4577 if (tg3_ctrl != tgtadv)
4584 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4588 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4591 if (tg3_readphy(tp, MII_STAT1000, &val))
4594 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4597 if (tg3_readphy(tp, MII_LPA, rmtadv))
4600 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4601 tp->link_config.rmt_adv = lpeth;
4606 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4608 if (curr_link_up != tp->link_up) {
4610 netif_carrier_on(tp->dev);
4612 netif_carrier_off(tp->dev);
4613 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4614 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4617 tg3_link_report(tp);
4624 static void tg3_clear_mac_status(struct tg3 *tp)
4629 MAC_STATUS_SYNC_CHANGED |
4630 MAC_STATUS_CFG_CHANGED |
4631 MAC_STATUS_MI_COMPLETION |
4632 MAC_STATUS_LNKSTATE_CHANGED);
4636 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4638 bool current_link_up;
4640 u32 lcl_adv, rmt_adv;
4645 tg3_clear_mac_status(tp);
4647 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4649 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4653 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4655 /* Some third-party PHYs need to be reset on link going
4658 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4659 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4660 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4662 tg3_readphy(tp, MII_BMSR, &bmsr);
4663 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4664 !(bmsr & BMSR_LSTATUS))
4670 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4671 tg3_readphy(tp, MII_BMSR, &bmsr);
4672 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4673 !tg3_flag(tp, INIT_COMPLETE))
4676 if (!(bmsr & BMSR_LSTATUS)) {
4677 err = tg3_init_5401phy_dsp(tp);
4681 tg3_readphy(tp, MII_BMSR, &bmsr);
4682 for (i = 0; i < 1000; i++) {
4684 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4685 (bmsr & BMSR_LSTATUS)) {
4691 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4692 TG3_PHY_REV_BCM5401_B0 &&
4693 !(bmsr & BMSR_LSTATUS) &&
4694 tp->link_config.active_speed == SPEED_1000) {
4695 err = tg3_phy_reset(tp);
4697 err = tg3_init_5401phy_dsp(tp);
4702 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4703 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4704 /* 5701 {A0,B0} CRC bug workaround */
4705 tg3_writephy(tp, 0x15, 0x0a75);
4706 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4707 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4711 /* Clear pending interrupts... */
4712 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4713 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4715 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4716 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4717 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4718 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4720 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4721 tg3_asic_rev(tp) == ASIC_REV_5701) {
4722 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4723 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4724 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4726 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4729 current_link_up = false;
4730 current_speed = SPEED_UNKNOWN;
4731 current_duplex = DUPLEX_UNKNOWN;
4732 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4733 tp->link_config.rmt_adv = 0;
4735 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4736 err = tg3_phy_auxctl_read(tp,
4737 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4739 if (!err && !(val & (1 << 10))) {
4740 tg3_phy_auxctl_write(tp,
4741 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4748 for (i = 0; i < 100; i++) {
4749 tg3_readphy(tp, MII_BMSR, &bmsr);
4750 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4751 (bmsr & BMSR_LSTATUS))
4756 if (bmsr & BMSR_LSTATUS) {
4759 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4760 for (i = 0; i < 2000; i++) {
4762 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4767 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4772 for (i = 0; i < 200; i++) {
4773 tg3_readphy(tp, MII_BMCR, &bmcr);
4774 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4776 if (bmcr && bmcr != 0x7fff)
4784 tp->link_config.active_speed = current_speed;
4785 tp->link_config.active_duplex = current_duplex;
4787 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4788 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4790 if ((bmcr & BMCR_ANENABLE) &&
4792 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4793 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4794 current_link_up = true;
4796 /* EEE settings changes take effect only after a phy
4797 * reset. If we have skipped a reset due to Link Flap
4798 * Avoidance being enabled, do it now.
4800 if (!eee_config_ok &&
4801 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4805 if (!(bmcr & BMCR_ANENABLE) &&
4806 tp->link_config.speed == current_speed &&
4807 tp->link_config.duplex == current_duplex) {
4808 current_link_up = true;
4812 if (current_link_up &&
4813 tp->link_config.active_duplex == DUPLEX_FULL) {
4816 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4817 reg = MII_TG3_FET_GEN_STAT;
4818 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4820 reg = MII_TG3_EXT_STAT;
4821 bit = MII_TG3_EXT_STAT_MDIX;
4824 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4825 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4827 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4832 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4833 tg3_phy_copper_begin(tp);
4835 if (tg3_flag(tp, ROBOSWITCH)) {
4836 current_link_up = true;
4837 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4838 current_speed = SPEED_1000;
4839 current_duplex = DUPLEX_FULL;
4840 tp->link_config.active_speed = current_speed;
4841 tp->link_config.active_duplex = current_duplex;
4844 tg3_readphy(tp, MII_BMSR, &bmsr);
4845 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4846 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4847 current_link_up = true;
4850 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4851 if (current_link_up) {
4852 if (tp->link_config.active_speed == SPEED_100 ||
4853 tp->link_config.active_speed == SPEED_10)
4854 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4856 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4857 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4858 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4860 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4862 /* In order for the 5750 core in BCM4785 chip to work properly
4863 * in RGMII mode, the Led Control Register must be set up.
4865 if (tg3_flag(tp, RGMII_MODE)) {
4866 u32 led_ctrl = tr32(MAC_LED_CTRL);
4867 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4869 if (tp->link_config.active_speed == SPEED_10)
4870 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4871 else if (tp->link_config.active_speed == SPEED_100)
4872 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4873 LED_CTRL_100MBPS_ON);
4874 else if (tp->link_config.active_speed == SPEED_1000)
4875 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4876 LED_CTRL_1000MBPS_ON);
4878 tw32(MAC_LED_CTRL, led_ctrl);
4882 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4883 if (tp->link_config.active_duplex == DUPLEX_HALF)
4884 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4886 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4887 if (current_link_up &&
4888 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4889 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4891 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4894 /* ??? Without this setting Netgear GA302T PHY does not
4895 * ??? send/receive packets...
4897 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4898 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4899 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4900 tw32_f(MAC_MI_MODE, tp->mi_mode);
4904 tw32_f(MAC_MODE, tp->mac_mode);
4907 tg3_phy_eee_adjust(tp, current_link_up);
4909 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4910 /* Polled via timer. */
4911 tw32_f(MAC_EVENT, 0);
4913 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4917 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4919 tp->link_config.active_speed == SPEED_1000 &&
4920 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4923 (MAC_STATUS_SYNC_CHANGED |
4924 MAC_STATUS_CFG_CHANGED));
4927 NIC_SRAM_FIRMWARE_MBOX,
4928 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4931 /* Prevent send BD corruption. */
4932 if (tg3_flag(tp, CLKREQ_BUG)) {
4933 if (tp->link_config.active_speed == SPEED_100 ||
4934 tp->link_config.active_speed == SPEED_10)
4935 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4936 PCI_EXP_LNKCTL_CLKREQ_EN);
4938 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4939 PCI_EXP_LNKCTL_CLKREQ_EN);
4942 tg3_test_and_report_link_chg(tp, current_link_up);
4947 struct tg3_fiber_aneginfo {
4949 #define ANEG_STATE_UNKNOWN 0
4950 #define ANEG_STATE_AN_ENABLE 1
4951 #define ANEG_STATE_RESTART_INIT 2
4952 #define ANEG_STATE_RESTART 3
4953 #define ANEG_STATE_DISABLE_LINK_OK 4
4954 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4955 #define ANEG_STATE_ABILITY_DETECT 6
4956 #define ANEG_STATE_ACK_DETECT_INIT 7
4957 #define ANEG_STATE_ACK_DETECT 8
4958 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4959 #define ANEG_STATE_COMPLETE_ACK 10
4960 #define ANEG_STATE_IDLE_DETECT_INIT 11
4961 #define ANEG_STATE_IDLE_DETECT 12
4962 #define ANEG_STATE_LINK_OK 13
4963 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4964 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4967 #define MR_AN_ENABLE 0x00000001
4968 #define MR_RESTART_AN 0x00000002
4969 #define MR_AN_COMPLETE 0x00000004
4970 #define MR_PAGE_RX 0x00000008
4971 #define MR_NP_LOADED 0x00000010
4972 #define MR_TOGGLE_TX 0x00000020
4973 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4974 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4975 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4976 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4977 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4978 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4979 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4980 #define MR_TOGGLE_RX 0x00002000
4981 #define MR_NP_RX 0x00004000
4983 #define MR_LINK_OK 0x80000000
4985 unsigned long link_time, cur_time;
4987 u32 ability_match_cfg;
4988 int ability_match_count;
4990 char ability_match, idle_match, ack_match;
4992 u32 txconfig, rxconfig;
4993 #define ANEG_CFG_NP 0x00000080
4994 #define ANEG_CFG_ACK 0x00000040
4995 #define ANEG_CFG_RF2 0x00000020
4996 #define ANEG_CFG_RF1 0x00000010
4997 #define ANEG_CFG_PS2 0x00000001
4998 #define ANEG_CFG_PS1 0x00008000
4999 #define ANEG_CFG_HD 0x00004000
5000 #define ANEG_CFG_FD 0x00002000
5001 #define ANEG_CFG_INVAL 0x00001f06
5006 #define ANEG_TIMER_ENAB 2
5007 #define ANEG_FAILED -1
5009 #define ANEG_STATE_SETTLE_TIME 10000
5011 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5012 struct tg3_fiber_aneginfo *ap)
5015 unsigned long delta;
5019 if (ap->state == ANEG_STATE_UNKNOWN) {
5023 ap->ability_match_cfg = 0;
5024 ap->ability_match_count = 0;
5025 ap->ability_match = 0;
5031 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5032 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5034 if (rx_cfg_reg != ap->ability_match_cfg) {
5035 ap->ability_match_cfg = rx_cfg_reg;
5036 ap->ability_match = 0;
5037 ap->ability_match_count = 0;
5039 if (++ap->ability_match_count > 1) {
5040 ap->ability_match = 1;
5041 ap->ability_match_cfg = rx_cfg_reg;
5044 if (rx_cfg_reg & ANEG_CFG_ACK)
5052 ap->ability_match_cfg = 0;
5053 ap->ability_match_count = 0;
5054 ap->ability_match = 0;
5060 ap->rxconfig = rx_cfg_reg;
5063 switch (ap->state) {
5064 case ANEG_STATE_UNKNOWN:
5065 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5066 ap->state = ANEG_STATE_AN_ENABLE;
5069 case ANEG_STATE_AN_ENABLE:
5070 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5071 if (ap->flags & MR_AN_ENABLE) {
5074 ap->ability_match_cfg = 0;
5075 ap->ability_match_count = 0;
5076 ap->ability_match = 0;
5080 ap->state = ANEG_STATE_RESTART_INIT;
5082 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5086 case ANEG_STATE_RESTART_INIT:
5087 ap->link_time = ap->cur_time;
5088 ap->flags &= ~(MR_NP_LOADED);
5090 tw32(MAC_TX_AUTO_NEG, 0);
5091 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5092 tw32_f(MAC_MODE, tp->mac_mode);
5095 ret = ANEG_TIMER_ENAB;
5096 ap->state = ANEG_STATE_RESTART;
5099 case ANEG_STATE_RESTART:
5100 delta = ap->cur_time - ap->link_time;
5101 if (delta > ANEG_STATE_SETTLE_TIME)
5102 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5104 ret = ANEG_TIMER_ENAB;
5107 case ANEG_STATE_DISABLE_LINK_OK:
5111 case ANEG_STATE_ABILITY_DETECT_INIT:
5112 ap->flags &= ~(MR_TOGGLE_TX);
5113 ap->txconfig = ANEG_CFG_FD;
5114 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5115 if (flowctrl & ADVERTISE_1000XPAUSE)
5116 ap->txconfig |= ANEG_CFG_PS1;
5117 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5118 ap->txconfig |= ANEG_CFG_PS2;
5119 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5120 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5121 tw32_f(MAC_MODE, tp->mac_mode);
5124 ap->state = ANEG_STATE_ABILITY_DETECT;
5127 case ANEG_STATE_ABILITY_DETECT:
5128 if (ap->ability_match != 0 && ap->rxconfig != 0)
5129 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5132 case ANEG_STATE_ACK_DETECT_INIT:
5133 ap->txconfig |= ANEG_CFG_ACK;
5134 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5135 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5136 tw32_f(MAC_MODE, tp->mac_mode);
5139 ap->state = ANEG_STATE_ACK_DETECT;
5142 case ANEG_STATE_ACK_DETECT:
5143 if (ap->ack_match != 0) {
5144 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5145 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5146 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5148 ap->state = ANEG_STATE_AN_ENABLE;
5150 } else if (ap->ability_match != 0 &&
5151 ap->rxconfig == 0) {
5152 ap->state = ANEG_STATE_AN_ENABLE;
5156 case ANEG_STATE_COMPLETE_ACK_INIT:
5157 if (ap->rxconfig & ANEG_CFG_INVAL) {
5161 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5162 MR_LP_ADV_HALF_DUPLEX |
5163 MR_LP_ADV_SYM_PAUSE |
5164 MR_LP_ADV_ASYM_PAUSE |
5165 MR_LP_ADV_REMOTE_FAULT1 |
5166 MR_LP_ADV_REMOTE_FAULT2 |
5167 MR_LP_ADV_NEXT_PAGE |
5170 if (ap->rxconfig & ANEG_CFG_FD)
5171 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5172 if (ap->rxconfig & ANEG_CFG_HD)
5173 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5174 if (ap->rxconfig & ANEG_CFG_PS1)
5175 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5176 if (ap->rxconfig & ANEG_CFG_PS2)
5177 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5178 if (ap->rxconfig & ANEG_CFG_RF1)
5179 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5180 if (ap->rxconfig & ANEG_CFG_RF2)
5181 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5182 if (ap->rxconfig & ANEG_CFG_NP)
5183 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5185 ap->link_time = ap->cur_time;
5187 ap->flags ^= (MR_TOGGLE_TX);
5188 if (ap->rxconfig & 0x0008)
5189 ap->flags |= MR_TOGGLE_RX;
5190 if (ap->rxconfig & ANEG_CFG_NP)
5191 ap->flags |= MR_NP_RX;
5192 ap->flags |= MR_PAGE_RX;
5194 ap->state = ANEG_STATE_COMPLETE_ACK;
5195 ret = ANEG_TIMER_ENAB;
5198 case ANEG_STATE_COMPLETE_ACK:
5199 if (ap->ability_match != 0 &&
5200 ap->rxconfig == 0) {
5201 ap->state = ANEG_STATE_AN_ENABLE;
5204 delta = ap->cur_time - ap->link_time;
5205 if (delta > ANEG_STATE_SETTLE_TIME) {
5206 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5207 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5209 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5210 !(ap->flags & MR_NP_RX)) {
5211 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5219 case ANEG_STATE_IDLE_DETECT_INIT:
5220 ap->link_time = ap->cur_time;
5221 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5222 tw32_f(MAC_MODE, tp->mac_mode);
5225 ap->state = ANEG_STATE_IDLE_DETECT;
5226 ret = ANEG_TIMER_ENAB;
5229 case ANEG_STATE_IDLE_DETECT:
5230 if (ap->ability_match != 0 &&
5231 ap->rxconfig == 0) {
5232 ap->state = ANEG_STATE_AN_ENABLE;
5235 delta = ap->cur_time - ap->link_time;
5236 if (delta > ANEG_STATE_SETTLE_TIME) {
5237 /* XXX another gem from the Broadcom driver :( */
5238 ap->state = ANEG_STATE_LINK_OK;
5242 case ANEG_STATE_LINK_OK:
5243 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5247 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5248 /* ??? unimplemented */
5251 case ANEG_STATE_NEXT_PAGE_WAIT:
5252 /* ??? unimplemented */
5263 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5266 struct tg3_fiber_aneginfo aninfo;
5267 int status = ANEG_FAILED;
5271 tw32_f(MAC_TX_AUTO_NEG, 0);
5273 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5274 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5277 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5280 memset(&aninfo, 0, sizeof(aninfo));
5281 aninfo.flags |= MR_AN_ENABLE;
5282 aninfo.state = ANEG_STATE_UNKNOWN;
5283 aninfo.cur_time = 0;
5285 while (++tick < 195000) {
5286 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5287 if (status == ANEG_DONE || status == ANEG_FAILED)
5293 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5294 tw32_f(MAC_MODE, tp->mac_mode);
5297 *txflags = aninfo.txconfig;
5298 *rxflags = aninfo.flags;
5300 if (status == ANEG_DONE &&
5301 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5302 MR_LP_ADV_FULL_DUPLEX)))
5308 static void tg3_init_bcm8002(struct tg3 *tp)
5310 u32 mac_status = tr32(MAC_STATUS);
5313 /* Reset when initting first time or we have a link. */
5314 if (tg3_flag(tp, INIT_COMPLETE) &&
5315 !(mac_status & MAC_STATUS_PCS_SYNCED))
5318 /* Set PLL lock range. */
5319 tg3_writephy(tp, 0x16, 0x8007);
5322 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5324 /* Wait for reset to complete. */
5325 /* XXX schedule_timeout() ... */
5326 for (i = 0; i < 500; i++)
5329 /* Config mode; select PMA/Ch 1 regs. */
5330 tg3_writephy(tp, 0x10, 0x8411);
5332 /* Enable auto-lock and comdet, select txclk for tx. */
5333 tg3_writephy(tp, 0x11, 0x0a10);
5335 tg3_writephy(tp, 0x18, 0x00a0);
5336 tg3_writephy(tp, 0x16, 0x41ff);
5338 /* Assert and deassert POR. */
5339 tg3_writephy(tp, 0x13, 0x0400);
5341 tg3_writephy(tp, 0x13, 0x0000);
5343 tg3_writephy(tp, 0x11, 0x0a50);
5345 tg3_writephy(tp, 0x11, 0x0a10);
5347 /* Wait for signal to stabilize */
5348 /* XXX schedule_timeout() ... */
5349 for (i = 0; i < 15000; i++)
5352 /* Deselect the channel register so we can read the PHYID
5355 tg3_writephy(tp, 0x10, 0x8011);
5358 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5361 bool current_link_up;
5362 u32 sg_dig_ctrl, sg_dig_status;
5363 u32 serdes_cfg, expected_sg_dig_ctrl;
5364 int workaround, port_a;
5367 expected_sg_dig_ctrl = 0;
5370 current_link_up = false;
5372 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5373 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5375 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5378 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5379 /* preserve bits 20-23 for voltage regulator */
5380 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5383 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5385 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5386 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5388 u32 val = serdes_cfg;
5394 tw32_f(MAC_SERDES_CFG, val);
5397 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5399 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5400 tg3_setup_flow_control(tp, 0, 0);
5401 current_link_up = true;
5406 /* Want auto-negotiation. */
5407 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5409 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5410 if (flowctrl & ADVERTISE_1000XPAUSE)
5411 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5412 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5413 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5415 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5416 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5417 tp->serdes_counter &&
5418 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5419 MAC_STATUS_RCVD_CFG)) ==
5420 MAC_STATUS_PCS_SYNCED)) {
5421 tp->serdes_counter--;
5422 current_link_up = true;
5427 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5428 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5430 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5432 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5433 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5434 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5435 MAC_STATUS_SIGNAL_DET)) {
5436 sg_dig_status = tr32(SG_DIG_STATUS);
5437 mac_status = tr32(MAC_STATUS);
5439 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5440 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5441 u32 local_adv = 0, remote_adv = 0;
5443 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5444 local_adv |= ADVERTISE_1000XPAUSE;
5445 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5446 local_adv |= ADVERTISE_1000XPSE_ASYM;
5448 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5449 remote_adv |= LPA_1000XPAUSE;
5450 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5451 remote_adv |= LPA_1000XPAUSE_ASYM;
5453 tp->link_config.rmt_adv =
5454 mii_adv_to_ethtool_adv_x(remote_adv);
5456 tg3_setup_flow_control(tp, local_adv, remote_adv);
5457 current_link_up = true;
5458 tp->serdes_counter = 0;
5459 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5460 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5461 if (tp->serdes_counter)
5462 tp->serdes_counter--;
5465 u32 val = serdes_cfg;
5472 tw32_f(MAC_SERDES_CFG, val);
5475 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5478 /* Link parallel detection - link is up */
5479 /* only if we have PCS_SYNC and not */
5480 /* receiving config code words */
5481 mac_status = tr32(MAC_STATUS);
5482 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5483 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5484 tg3_setup_flow_control(tp, 0, 0);
5485 current_link_up = true;
5487 TG3_PHYFLG_PARALLEL_DETECT;
5488 tp->serdes_counter =
5489 SERDES_PARALLEL_DET_TIMEOUT;
5491 goto restart_autoneg;
5495 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5496 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5500 return current_link_up;
5503 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5505 bool current_link_up = false;
5507 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5510 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5511 u32 txflags, rxflags;
5514 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5515 u32 local_adv = 0, remote_adv = 0;
5517 if (txflags & ANEG_CFG_PS1)
5518 local_adv |= ADVERTISE_1000XPAUSE;
5519 if (txflags & ANEG_CFG_PS2)
5520 local_adv |= ADVERTISE_1000XPSE_ASYM;
5522 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5523 remote_adv |= LPA_1000XPAUSE;
5524 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5525 remote_adv |= LPA_1000XPAUSE_ASYM;
5527 tp->link_config.rmt_adv =
5528 mii_adv_to_ethtool_adv_x(remote_adv);
5530 tg3_setup_flow_control(tp, local_adv, remote_adv);
5532 current_link_up = true;
5534 for (i = 0; i < 30; i++) {
5537 (MAC_STATUS_SYNC_CHANGED |
5538 MAC_STATUS_CFG_CHANGED));
5540 if ((tr32(MAC_STATUS) &
5541 (MAC_STATUS_SYNC_CHANGED |
5542 MAC_STATUS_CFG_CHANGED)) == 0)
5546 mac_status = tr32(MAC_STATUS);
5547 if (!current_link_up &&
5548 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5549 !(mac_status & MAC_STATUS_RCVD_CFG))
5550 current_link_up = true;
5552 tg3_setup_flow_control(tp, 0, 0);
5554 /* Forcing 1000FD link up. */
5555 current_link_up = true;
5557 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5560 tw32_f(MAC_MODE, tp->mac_mode);
5565 return current_link_up;
5568 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5571 u16 orig_active_speed;
5572 u8 orig_active_duplex;
5574 bool current_link_up;
5577 orig_pause_cfg = tp->link_config.active_flowctrl;
5578 orig_active_speed = tp->link_config.active_speed;
5579 orig_active_duplex = tp->link_config.active_duplex;
5581 if (!tg3_flag(tp, HW_AUTONEG) &&
5583 tg3_flag(tp, INIT_COMPLETE)) {
5584 mac_status = tr32(MAC_STATUS);
5585 mac_status &= (MAC_STATUS_PCS_SYNCED |
5586 MAC_STATUS_SIGNAL_DET |
5587 MAC_STATUS_CFG_CHANGED |
5588 MAC_STATUS_RCVD_CFG);
5589 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5590 MAC_STATUS_SIGNAL_DET)) {
5591 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5592 MAC_STATUS_CFG_CHANGED));
5597 tw32_f(MAC_TX_AUTO_NEG, 0);
5599 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5600 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5601 tw32_f(MAC_MODE, tp->mac_mode);
5604 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5605 tg3_init_bcm8002(tp);
5607 /* Enable link change event even when serdes polling. */
5608 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5611 current_link_up = false;
5612 tp->link_config.rmt_adv = 0;
5613 mac_status = tr32(MAC_STATUS);
5615 if (tg3_flag(tp, HW_AUTONEG))
5616 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5618 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5620 tp->napi[0].hw_status->status =
5621 (SD_STATUS_UPDATED |
5622 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5624 for (i = 0; i < 100; i++) {
5625 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5626 MAC_STATUS_CFG_CHANGED));
5628 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5629 MAC_STATUS_CFG_CHANGED |
5630 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5634 mac_status = tr32(MAC_STATUS);
5635 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5636 current_link_up = false;
5637 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5638 tp->serdes_counter == 0) {
5639 tw32_f(MAC_MODE, (tp->mac_mode |
5640 MAC_MODE_SEND_CONFIGS));
5642 tw32_f(MAC_MODE, tp->mac_mode);
5646 if (current_link_up) {
5647 tp->link_config.active_speed = SPEED_1000;
5648 tp->link_config.active_duplex = DUPLEX_FULL;
5649 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5650 LED_CTRL_LNKLED_OVERRIDE |
5651 LED_CTRL_1000MBPS_ON));
5653 tp->link_config.active_speed = SPEED_UNKNOWN;
5654 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5655 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5656 LED_CTRL_LNKLED_OVERRIDE |
5657 LED_CTRL_TRAFFIC_OVERRIDE));
5660 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5661 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5662 if (orig_pause_cfg != now_pause_cfg ||
5663 orig_active_speed != tp->link_config.active_speed ||
5664 orig_active_duplex != tp->link_config.active_duplex)
5665 tg3_link_report(tp);
5671 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5675 u16 current_speed = SPEED_UNKNOWN;
5676 u8 current_duplex = DUPLEX_UNKNOWN;
5677 bool current_link_up = false;
5678 u32 local_adv, remote_adv, sgsr;
5680 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5681 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5682 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5683 (sgsr & SERDES_TG3_SGMII_MODE)) {
5688 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5690 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5691 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5693 current_link_up = true;
5694 if (sgsr & SERDES_TG3_SPEED_1000) {
5695 current_speed = SPEED_1000;
5696 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5697 } else if (sgsr & SERDES_TG3_SPEED_100) {
5698 current_speed = SPEED_100;
5699 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5701 current_speed = SPEED_10;
5702 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5705 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5706 current_duplex = DUPLEX_FULL;
5708 current_duplex = DUPLEX_HALF;
5711 tw32_f(MAC_MODE, tp->mac_mode);
5714 tg3_clear_mac_status(tp);
5716 goto fiber_setup_done;
5719 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5720 tw32_f(MAC_MODE, tp->mac_mode);
5723 tg3_clear_mac_status(tp);
5728 tp->link_config.rmt_adv = 0;
5730 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5731 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5732 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5733 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5734 bmsr |= BMSR_LSTATUS;
5736 bmsr &= ~BMSR_LSTATUS;
5739 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5741 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5742 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5743 /* do nothing, just check for link up at the end */
5744 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5747 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5748 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5749 ADVERTISE_1000XPAUSE |
5750 ADVERTISE_1000XPSE_ASYM |
5753 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5754 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5756 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5757 tg3_writephy(tp, MII_ADVERTISE, newadv);
5758 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5759 tg3_writephy(tp, MII_BMCR, bmcr);
5761 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5762 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5763 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5770 bmcr &= ~BMCR_SPEED1000;
5771 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5773 if (tp->link_config.duplex == DUPLEX_FULL)
5774 new_bmcr |= BMCR_FULLDPLX;
5776 if (new_bmcr != bmcr) {
5777 /* BMCR_SPEED1000 is a reserved bit that needs
5778 * to be set on write.
5780 new_bmcr |= BMCR_SPEED1000;
5782 /* Force a linkdown */
5786 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5787 adv &= ~(ADVERTISE_1000XFULL |
5788 ADVERTISE_1000XHALF |
5790 tg3_writephy(tp, MII_ADVERTISE, adv);
5791 tg3_writephy(tp, MII_BMCR, bmcr |
5795 tg3_carrier_off(tp);
5797 tg3_writephy(tp, MII_BMCR, new_bmcr);
5799 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5800 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5801 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5802 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5803 bmsr |= BMSR_LSTATUS;
5805 bmsr &= ~BMSR_LSTATUS;
5807 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5811 if (bmsr & BMSR_LSTATUS) {
5812 current_speed = SPEED_1000;
5813 current_link_up = true;
5814 if (bmcr & BMCR_FULLDPLX)
5815 current_duplex = DUPLEX_FULL;
5817 current_duplex = DUPLEX_HALF;
5822 if (bmcr & BMCR_ANENABLE) {
5825 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5826 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5827 common = local_adv & remote_adv;
5828 if (common & (ADVERTISE_1000XHALF |
5829 ADVERTISE_1000XFULL)) {
5830 if (common & ADVERTISE_1000XFULL)
5831 current_duplex = DUPLEX_FULL;
5833 current_duplex = DUPLEX_HALF;
5835 tp->link_config.rmt_adv =
5836 mii_adv_to_ethtool_adv_x(remote_adv);
5837 } else if (!tg3_flag(tp, 5780_CLASS)) {
5838 /* Link is up via parallel detect */
5840 current_link_up = false;
5846 if (current_link_up && current_duplex == DUPLEX_FULL)
5847 tg3_setup_flow_control(tp, local_adv, remote_adv);
5849 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5850 if (tp->link_config.active_duplex == DUPLEX_HALF)
5851 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5853 tw32_f(MAC_MODE, tp->mac_mode);
5856 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5858 tp->link_config.active_speed = current_speed;
5859 tp->link_config.active_duplex = current_duplex;
5861 tg3_test_and_report_link_chg(tp, current_link_up);
5865 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5867 if (tp->serdes_counter) {
5868 /* Give autoneg time to complete. */
5869 tp->serdes_counter--;
5874 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5877 tg3_readphy(tp, MII_BMCR, &bmcr);
5878 if (bmcr & BMCR_ANENABLE) {
5881 /* Select shadow register 0x1f */
5882 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5883 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5885 /* Select expansion interrupt status register */
5886 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5887 MII_TG3_DSP_EXP1_INT_STAT);
5888 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5889 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5891 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5892 /* We have signal detect and not receiving
5893 * config code words, link is up by parallel
5897 bmcr &= ~BMCR_ANENABLE;
5898 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5899 tg3_writephy(tp, MII_BMCR, bmcr);
5900 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5903 } else if (tp->link_up &&
5904 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5905 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5908 /* Select expansion interrupt status register */
5909 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5910 MII_TG3_DSP_EXP1_INT_STAT);
5911 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5915 /* Config code words received, turn on autoneg. */
5916 tg3_readphy(tp, MII_BMCR, &bmcr);
5917 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5919 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5925 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5930 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5931 err = tg3_setup_fiber_phy(tp, force_reset);
5932 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5933 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5935 err = tg3_setup_copper_phy(tp, force_reset);
5937 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5940 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5941 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5943 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5948 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5949 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5950 tw32(GRC_MISC_CFG, val);
5953 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5954 (6 << TX_LENGTHS_IPG_SHIFT);
5955 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5956 tg3_asic_rev(tp) == ASIC_REV_5762)
5957 val |= tr32(MAC_TX_LENGTHS) &
5958 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5959 TX_LENGTHS_CNT_DWN_VAL_MSK);
5961 if (tp->link_config.active_speed == SPEED_1000 &&
5962 tp->link_config.active_duplex == DUPLEX_HALF)
5963 tw32(MAC_TX_LENGTHS, val |
5964 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5966 tw32(MAC_TX_LENGTHS, val |
5967 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5969 if (!tg3_flag(tp, 5705_PLUS)) {
5971 tw32(HOSTCC_STAT_COAL_TICKS,
5972 tp->coal.stats_block_coalesce_usecs);
5974 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5978 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5979 val = tr32(PCIE_PWR_MGMT_THRESH);
5981 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5984 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5985 tw32(PCIE_PWR_MGMT_THRESH, val);
5991 /* tp->lock must be held */
5992 static u64 tg3_refclk_read(struct tg3 *tp)
5994 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5995 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5998 /* tp->lock must be held */
5999 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6001 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6002 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6003 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6004 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6007 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6008 static inline void tg3_full_unlock(struct tg3 *tp);
6009 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6011 struct tg3 *tp = netdev_priv(dev);
6013 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6014 SOF_TIMESTAMPING_RX_SOFTWARE |
6015 SOF_TIMESTAMPING_SOFTWARE;
6017 if (tg3_flag(tp, PTP_CAPABLE)) {
6018 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6019 SOF_TIMESTAMPING_RX_HARDWARE |
6020 SOF_TIMESTAMPING_RAW_HARDWARE;
6024 info->phc_index = ptp_clock_index(tp->ptp_clock);
6026 info->phc_index = -1;
6028 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6030 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6031 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6032 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6033 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6037 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6039 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6040 bool neg_adj = false;
6048 /* Frequency adjustment is performed using hardware with a 24 bit
6049 * accumulator and a programmable correction value. On each clk, the
6050 * correction value gets added to the accumulator and when it
6051 * overflows, the time counter is incremented/decremented.
6053 * So conversion from ppb to correction value is
6054 * ppb * (1 << 24) / 1000000000
6056 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6057 TG3_EAV_REF_CLK_CORRECT_MASK;
6059 tg3_full_lock(tp, 0);
6062 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6063 TG3_EAV_REF_CLK_CORRECT_EN |
6064 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6066 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6068 tg3_full_unlock(tp);
6073 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6075 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6077 tg3_full_lock(tp, 0);
6078 tp->ptp_adjust += delta;
6079 tg3_full_unlock(tp);
6084 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6088 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6090 tg3_full_lock(tp, 0);
6091 ns = tg3_refclk_read(tp);
6092 ns += tp->ptp_adjust;
6093 tg3_full_unlock(tp);
6095 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6096 ts->tv_nsec = remainder;
6101 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6102 const struct timespec *ts)
6105 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6107 ns = timespec_to_ns(ts);
6109 tg3_full_lock(tp, 0);
6110 tg3_refclk_write(tp, ns);
6112 tg3_full_unlock(tp);
6117 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6118 struct ptp_clock_request *rq, int on)
6123 static const struct ptp_clock_info tg3_ptp_caps = {
6124 .owner = THIS_MODULE,
6125 .name = "tg3 clock",
6126 .max_adj = 250000000,
6131 .adjfreq = tg3_ptp_adjfreq,
6132 .adjtime = tg3_ptp_adjtime,
6133 .gettime = tg3_ptp_gettime,
6134 .settime = tg3_ptp_settime,
6135 .enable = tg3_ptp_enable,
6138 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6139 struct skb_shared_hwtstamps *timestamp)
6141 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6142 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6146 /* tp->lock must be held */
6147 static void tg3_ptp_init(struct tg3 *tp)
6149 if (!tg3_flag(tp, PTP_CAPABLE))
6152 /* Initialize the hardware clock to the system time. */
6153 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6155 tp->ptp_info = tg3_ptp_caps;
6158 /* tp->lock must be held */
6159 static void tg3_ptp_resume(struct tg3 *tp)
6161 if (!tg3_flag(tp, PTP_CAPABLE))
6164 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6168 static void tg3_ptp_fini(struct tg3 *tp)
6170 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6173 ptp_clock_unregister(tp->ptp_clock);
6174 tp->ptp_clock = NULL;
6178 static inline int tg3_irq_sync(struct tg3 *tp)
6180 return tp->irq_sync;
6183 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6187 dst = (u32 *)((u8 *)dst + off);
6188 for (i = 0; i < len; i += sizeof(u32))
6189 *dst++ = tr32(off + i);
6192 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6194 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6195 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6196 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6197 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6198 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6199 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6200 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6201 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6202 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6203 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6204 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6205 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6206 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6207 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6208 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6209 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6210 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6211 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6212 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6214 if (tg3_flag(tp, SUPPORT_MSIX))
6215 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6217 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6218 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6219 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6220 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6221 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6222 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6223 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6224 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6226 if (!tg3_flag(tp, 5705_PLUS)) {
6227 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6228 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6229 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6232 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6233 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6234 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6235 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6236 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6238 if (tg3_flag(tp, NVRAM))
6239 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6242 static void tg3_dump_state(struct tg3 *tp)
6247 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6251 if (tg3_flag(tp, PCI_EXPRESS)) {
6252 /* Read up to but not including private PCI registers */
6253 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6254 regs[i / sizeof(u32)] = tr32(i);
6256 tg3_dump_legacy_regs(tp, regs);
6258 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6259 if (!regs[i + 0] && !regs[i + 1] &&
6260 !regs[i + 2] && !regs[i + 3])
6263 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6265 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6270 for (i = 0; i < tp->irq_cnt; i++) {
6271 struct tg3_napi *tnapi = &tp->napi[i];
6273 /* SW status block */
6275 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6277 tnapi->hw_status->status,
6278 tnapi->hw_status->status_tag,
6279 tnapi->hw_status->rx_jumbo_consumer,
6280 tnapi->hw_status->rx_consumer,
6281 tnapi->hw_status->rx_mini_consumer,
6282 tnapi->hw_status->idx[0].rx_producer,
6283 tnapi->hw_status->idx[0].tx_consumer);
6286 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6288 tnapi->last_tag, tnapi->last_irq_tag,
6289 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6291 tnapi->prodring.rx_std_prod_idx,
6292 tnapi->prodring.rx_std_cons_idx,
6293 tnapi->prodring.rx_jmb_prod_idx,
6294 tnapi->prodring.rx_jmb_cons_idx);
6298 /* This is called whenever we suspect that the system chipset is re-
6299 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6300 * is bogus tx completions. We try to recover by setting the
6301 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6304 static void tg3_tx_recover(struct tg3 *tp)
6306 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6307 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6309 netdev_warn(tp->dev,
6310 "The system may be re-ordering memory-mapped I/O "
6311 "cycles to the network device, attempting to recover. "
6312 "Please report the problem to the driver maintainer "
6313 "and include system chipset information.\n");
6315 spin_lock(&tp->lock);
6316 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6317 spin_unlock(&tp->lock);
6320 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6322 /* Tell compiler to fetch tx indices from memory. */
6324 return tnapi->tx_pending -
6325 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6328 /* Tigon3 never reports partial packet sends. So we do not
6329 * need special logic to handle SKBs that have not had all
6330 * of their frags sent yet, like SunGEM does.
6332 static void tg3_tx(struct tg3_napi *tnapi)
6334 struct tg3 *tp = tnapi->tp;
6335 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6336 u32 sw_idx = tnapi->tx_cons;
6337 struct netdev_queue *txq;
6338 int index = tnapi - tp->napi;
6339 unsigned int pkts_compl = 0, bytes_compl = 0;
6341 if (tg3_flag(tp, ENABLE_TSS))
6344 txq = netdev_get_tx_queue(tp->dev, index);
6346 while (sw_idx != hw_idx) {
6347 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6348 struct sk_buff *skb = ri->skb;
6351 if (unlikely(skb == NULL)) {
6356 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6357 struct skb_shared_hwtstamps timestamp;
6358 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6359 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6361 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6363 skb_tstamp_tx(skb, ×tamp);
6366 pci_unmap_single(tp->pdev,
6367 dma_unmap_addr(ri, mapping),
6373 while (ri->fragmented) {
6374 ri->fragmented = false;
6375 sw_idx = NEXT_TX(sw_idx);
6376 ri = &tnapi->tx_buffers[sw_idx];
6379 sw_idx = NEXT_TX(sw_idx);
6381 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6382 ri = &tnapi->tx_buffers[sw_idx];
6383 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6386 pci_unmap_page(tp->pdev,
6387 dma_unmap_addr(ri, mapping),
6388 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6391 while (ri->fragmented) {
6392 ri->fragmented = false;
6393 sw_idx = NEXT_TX(sw_idx);
6394 ri = &tnapi->tx_buffers[sw_idx];
6397 sw_idx = NEXT_TX(sw_idx);
6401 bytes_compl += skb->len;
6405 if (unlikely(tx_bug)) {
6411 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6413 tnapi->tx_cons = sw_idx;
6415 /* Need to make the tx_cons update visible to tg3_start_xmit()
6416 * before checking for netif_queue_stopped(). Without the
6417 * memory barrier, there is a small possibility that tg3_start_xmit()
6418 * will miss it and cause the queue to be stopped forever.
6422 if (unlikely(netif_tx_queue_stopped(txq) &&
6423 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6424 __netif_tx_lock(txq, smp_processor_id());
6425 if (netif_tx_queue_stopped(txq) &&
6426 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6427 netif_tx_wake_queue(txq);
6428 __netif_tx_unlock(txq);
6432 static void tg3_frag_free(bool is_frag, void *data)
6435 put_page(virt_to_head_page(data));
6440 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6442 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6443 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6448 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6449 map_sz, PCI_DMA_FROMDEVICE);
6450 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6455 /* Returns size of skb allocated or < 0 on error.
6457 * We only need to fill in the address because the other members
6458 * of the RX descriptor are invariant, see tg3_init_rings.
6460 * Note the purposeful assymetry of cpu vs. chip accesses. For
6461 * posting buffers we only dirty the first cache line of the RX
6462 * descriptor (containing the address). Whereas for the RX status
6463 * buffers the cpu only reads the last cacheline of the RX descriptor
6464 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6466 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6467 u32 opaque_key, u32 dest_idx_unmasked,
6468 unsigned int *frag_size)
6470 struct tg3_rx_buffer_desc *desc;
6471 struct ring_info *map;
6474 int skb_size, data_size, dest_idx;
6476 switch (opaque_key) {
6477 case RXD_OPAQUE_RING_STD:
6478 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6479 desc = &tpr->rx_std[dest_idx];
6480 map = &tpr->rx_std_buffers[dest_idx];
6481 data_size = tp->rx_pkt_map_sz;
6484 case RXD_OPAQUE_RING_JUMBO:
6485 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6486 desc = &tpr->rx_jmb[dest_idx].std;
6487 map = &tpr->rx_jmb_buffers[dest_idx];
6488 data_size = TG3_RX_JMB_MAP_SZ;
6495 /* Do not overwrite any of the map or rp information
6496 * until we are sure we can commit to a new buffer.
6498 * Callers depend upon this behavior and assume that
6499 * we leave everything unchanged if we fail.
6501 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6502 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6503 if (skb_size <= PAGE_SIZE) {
6504 data = netdev_alloc_frag(skb_size);
6505 *frag_size = skb_size;
6507 data = kmalloc(skb_size, GFP_ATOMIC);
6513 mapping = pci_map_single(tp->pdev,
6514 data + TG3_RX_OFFSET(tp),
6516 PCI_DMA_FROMDEVICE);
6517 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6518 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6523 dma_unmap_addr_set(map, mapping, mapping);
6525 desc->addr_hi = ((u64)mapping >> 32);
6526 desc->addr_lo = ((u64)mapping & 0xffffffff);
6531 /* We only need to move over in the address because the other
6532 * members of the RX descriptor are invariant. See notes above
6533 * tg3_alloc_rx_data for full details.
6535 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6536 struct tg3_rx_prodring_set *dpr,
6537 u32 opaque_key, int src_idx,
6538 u32 dest_idx_unmasked)
6540 struct tg3 *tp = tnapi->tp;
6541 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6542 struct ring_info *src_map, *dest_map;
6543 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6546 switch (opaque_key) {
6547 case RXD_OPAQUE_RING_STD:
6548 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6549 dest_desc = &dpr->rx_std[dest_idx];
6550 dest_map = &dpr->rx_std_buffers[dest_idx];
6551 src_desc = &spr->rx_std[src_idx];
6552 src_map = &spr->rx_std_buffers[src_idx];
6555 case RXD_OPAQUE_RING_JUMBO:
6556 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6557 dest_desc = &dpr->rx_jmb[dest_idx].std;
6558 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6559 src_desc = &spr->rx_jmb[src_idx].std;
6560 src_map = &spr->rx_jmb_buffers[src_idx];
6567 dest_map->data = src_map->data;
6568 dma_unmap_addr_set(dest_map, mapping,
6569 dma_unmap_addr(src_map, mapping));
6570 dest_desc->addr_hi = src_desc->addr_hi;
6571 dest_desc->addr_lo = src_desc->addr_lo;
6573 /* Ensure that the update to the skb happens after the physical
6574 * addresses have been transferred to the new BD location.
6578 src_map->data = NULL;
6581 /* The RX ring scheme is composed of multiple rings which post fresh
6582 * buffers to the chip, and one special ring the chip uses to report
6583 * status back to the host.
6585 * The special ring reports the status of received packets to the
6586 * host. The chip does not write into the original descriptor the
6587 * RX buffer was obtained from. The chip simply takes the original
6588 * descriptor as provided by the host, updates the status and length
6589 * field, then writes this into the next status ring entry.
6591 * Each ring the host uses to post buffers to the chip is described
6592 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6593 * it is first placed into the on-chip ram. When the packet's length
6594 * is known, it walks down the TG3_BDINFO entries to select the ring.
6595 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6596 * which is within the range of the new packet's length is chosen.
6598 * The "separate ring for rx status" scheme may sound queer, but it makes
6599 * sense from a cache coherency perspective. If only the host writes
6600 * to the buffer post rings, and only the chip writes to the rx status
6601 * rings, then cache lines never move beyond shared-modified state.
6602 * If both the host and chip were to write into the same ring, cache line
6603 * eviction could occur since both entities want it in an exclusive state.
6605 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6607 struct tg3 *tp = tnapi->tp;
6608 u32 work_mask, rx_std_posted = 0;
6609 u32 std_prod_idx, jmb_prod_idx;
6610 u32 sw_idx = tnapi->rx_rcb_ptr;
6613 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6615 hw_idx = *(tnapi->rx_rcb_prod_idx);
6617 * We need to order the read of hw_idx and the read of
6618 * the opaque cookie.
6623 std_prod_idx = tpr->rx_std_prod_idx;
6624 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6625 while (sw_idx != hw_idx && budget > 0) {
6626 struct ring_info *ri;
6627 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6629 struct sk_buff *skb;
6630 dma_addr_t dma_addr;
6631 u32 opaque_key, desc_idx, *post_ptr;
6635 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6636 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6637 if (opaque_key == RXD_OPAQUE_RING_STD) {
6638 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6639 dma_addr = dma_unmap_addr(ri, mapping);
6641 post_ptr = &std_prod_idx;
6643 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6644 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6645 dma_addr = dma_unmap_addr(ri, mapping);
6647 post_ptr = &jmb_prod_idx;
6649 goto next_pkt_nopost;
6651 work_mask |= opaque_key;
6653 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6654 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6656 tg3_recycle_rx(tnapi, tpr, opaque_key,
6657 desc_idx, *post_ptr);
6659 /* Other statistics kept track of by card. */
6664 prefetch(data + TG3_RX_OFFSET(tp));
6665 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6668 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6669 RXD_FLAG_PTPSTAT_PTPV1 ||
6670 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6671 RXD_FLAG_PTPSTAT_PTPV2) {
6672 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6673 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6676 if (len > TG3_RX_COPY_THRESH(tp)) {
6678 unsigned int frag_size;
6680 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6681 *post_ptr, &frag_size);
6685 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6686 PCI_DMA_FROMDEVICE);
6688 skb = build_skb(data, frag_size);
6690 tg3_frag_free(frag_size != 0, data);
6691 goto drop_it_no_recycle;
6693 skb_reserve(skb, TG3_RX_OFFSET(tp));
6694 /* Ensure that the update to the data happens
6695 * after the usage of the old DMA mapping.
6702 tg3_recycle_rx(tnapi, tpr, opaque_key,
6703 desc_idx, *post_ptr);
6705 skb = netdev_alloc_skb(tp->dev,
6706 len + TG3_RAW_IP_ALIGN);
6708 goto drop_it_no_recycle;
6710 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6711 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6713 data + TG3_RX_OFFSET(tp),
6715 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6720 tg3_hwclock_to_timestamp(tp, tstamp,
6721 skb_hwtstamps(skb));
6723 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6724 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6725 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6726 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6727 skb->ip_summed = CHECKSUM_UNNECESSARY;
6729 skb_checksum_none_assert(skb);
6731 skb->protocol = eth_type_trans(skb, tp->dev);
6733 if (len > (tp->dev->mtu + ETH_HLEN) &&
6734 skb->protocol != htons(ETH_P_8021Q)) {
6736 goto drop_it_no_recycle;
6739 if (desc->type_flags & RXD_FLAG_VLAN &&
6740 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6741 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6742 desc->err_vlan & RXD_VLAN_MASK);
6744 napi_gro_receive(&tnapi->napi, skb);
6752 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6753 tpr->rx_std_prod_idx = std_prod_idx &
6754 tp->rx_std_ring_mask;
6755 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6756 tpr->rx_std_prod_idx);
6757 work_mask &= ~RXD_OPAQUE_RING_STD;
6762 sw_idx &= tp->rx_ret_ring_mask;
6764 /* Refresh hw_idx to see if there is new work */
6765 if (sw_idx == hw_idx) {
6766 hw_idx = *(tnapi->rx_rcb_prod_idx);
6771 /* ACK the status ring. */
6772 tnapi->rx_rcb_ptr = sw_idx;
6773 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6775 /* Refill RX ring(s). */
6776 if (!tg3_flag(tp, ENABLE_RSS)) {
6777 /* Sync BD data before updating mailbox */
6780 if (work_mask & RXD_OPAQUE_RING_STD) {
6781 tpr->rx_std_prod_idx = std_prod_idx &
6782 tp->rx_std_ring_mask;
6783 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6784 tpr->rx_std_prod_idx);
6786 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6787 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6788 tp->rx_jmb_ring_mask;
6789 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6790 tpr->rx_jmb_prod_idx);
6793 } else if (work_mask) {
6794 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6795 * updated before the producer indices can be updated.
6799 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6800 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6802 if (tnapi != &tp->napi[1]) {
6803 tp->rx_refill = true;
6804 napi_schedule(&tp->napi[1].napi);
6811 static void tg3_poll_link(struct tg3 *tp)
6813 /* handle link change and other phy events */
6814 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6815 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6817 if (sblk->status & SD_STATUS_LINK_CHG) {
6818 sblk->status = SD_STATUS_UPDATED |
6819 (sblk->status & ~SD_STATUS_LINK_CHG);
6820 spin_lock(&tp->lock);
6821 if (tg3_flag(tp, USE_PHYLIB)) {
6823 (MAC_STATUS_SYNC_CHANGED |
6824 MAC_STATUS_CFG_CHANGED |
6825 MAC_STATUS_MI_COMPLETION |
6826 MAC_STATUS_LNKSTATE_CHANGED));
6829 tg3_setup_phy(tp, false);
6830 spin_unlock(&tp->lock);
6835 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6836 struct tg3_rx_prodring_set *dpr,
6837 struct tg3_rx_prodring_set *spr)
6839 u32 si, di, cpycnt, src_prod_idx;
6843 src_prod_idx = spr->rx_std_prod_idx;
6845 /* Make sure updates to the rx_std_buffers[] entries and the
6846 * standard producer index are seen in the correct order.
6850 if (spr->rx_std_cons_idx == src_prod_idx)
6853 if (spr->rx_std_cons_idx < src_prod_idx)
6854 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6856 cpycnt = tp->rx_std_ring_mask + 1 -
6857 spr->rx_std_cons_idx;
6859 cpycnt = min(cpycnt,
6860 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6862 si = spr->rx_std_cons_idx;
6863 di = dpr->rx_std_prod_idx;
6865 for (i = di; i < di + cpycnt; i++) {
6866 if (dpr->rx_std_buffers[i].data) {
6876 /* Ensure that updates to the rx_std_buffers ring and the
6877 * shadowed hardware producer ring from tg3_recycle_skb() are
6878 * ordered correctly WRT the skb check above.
6882 memcpy(&dpr->rx_std_buffers[di],
6883 &spr->rx_std_buffers[si],
6884 cpycnt * sizeof(struct ring_info));
6886 for (i = 0; i < cpycnt; i++, di++, si++) {
6887 struct tg3_rx_buffer_desc *sbd, *dbd;
6888 sbd = &spr->rx_std[si];
6889 dbd = &dpr->rx_std[di];
6890 dbd->addr_hi = sbd->addr_hi;
6891 dbd->addr_lo = sbd->addr_lo;
6894 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6895 tp->rx_std_ring_mask;
6896 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6897 tp->rx_std_ring_mask;
6901 src_prod_idx = spr->rx_jmb_prod_idx;
6903 /* Make sure updates to the rx_jmb_buffers[] entries and
6904 * the jumbo producer index are seen in the correct order.
6908 if (spr->rx_jmb_cons_idx == src_prod_idx)
6911 if (spr->rx_jmb_cons_idx < src_prod_idx)
6912 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6914 cpycnt = tp->rx_jmb_ring_mask + 1 -
6915 spr->rx_jmb_cons_idx;
6917 cpycnt = min(cpycnt,
6918 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6920 si = spr->rx_jmb_cons_idx;
6921 di = dpr->rx_jmb_prod_idx;
6923 for (i = di; i < di + cpycnt; i++) {
6924 if (dpr->rx_jmb_buffers[i].data) {
6934 /* Ensure that updates to the rx_jmb_buffers ring and the
6935 * shadowed hardware producer ring from tg3_recycle_skb() are
6936 * ordered correctly WRT the skb check above.
6940 memcpy(&dpr->rx_jmb_buffers[di],
6941 &spr->rx_jmb_buffers[si],
6942 cpycnt * sizeof(struct ring_info));
6944 for (i = 0; i < cpycnt; i++, di++, si++) {
6945 struct tg3_rx_buffer_desc *sbd, *dbd;
6946 sbd = &spr->rx_jmb[si].std;
6947 dbd = &dpr->rx_jmb[di].std;
6948 dbd->addr_hi = sbd->addr_hi;
6949 dbd->addr_lo = sbd->addr_lo;
6952 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6953 tp->rx_jmb_ring_mask;
6954 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6955 tp->rx_jmb_ring_mask;
6961 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6963 struct tg3 *tp = tnapi->tp;
6965 /* run TX completion thread */
6966 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6968 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6972 if (!tnapi->rx_rcb_prod_idx)
6975 /* run RX thread, within the bounds set by NAPI.
6976 * All RX "locking" is done by ensuring outside
6977 * code synchronizes with tg3->napi.poll()
6979 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6980 work_done += tg3_rx(tnapi, budget - work_done);
6982 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6983 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6985 u32 std_prod_idx = dpr->rx_std_prod_idx;
6986 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6988 tp->rx_refill = false;
6989 for (i = 1; i <= tp->rxq_cnt; i++)
6990 err |= tg3_rx_prodring_xfer(tp, dpr,
6991 &tp->napi[i].prodring);
6995 if (std_prod_idx != dpr->rx_std_prod_idx)
6996 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6997 dpr->rx_std_prod_idx);
6999 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7000 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7001 dpr->rx_jmb_prod_idx);
7006 tw32_f(HOSTCC_MODE, tp->coal_now);
7012 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7014 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7015 schedule_work(&tp->reset_task);
7018 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7020 cancel_work_sync(&tp->reset_task);
7021 tg3_flag_clear(tp, RESET_TASK_PENDING);
7022 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7025 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7027 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7028 struct tg3 *tp = tnapi->tp;
7030 struct tg3_hw_status *sblk = tnapi->hw_status;
7033 work_done = tg3_poll_work(tnapi, work_done, budget);
7035 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7038 if (unlikely(work_done >= budget))
7041 /* tp->last_tag is used in tg3_int_reenable() below
7042 * to tell the hw how much work has been processed,
7043 * so we must read it before checking for more work.
7045 tnapi->last_tag = sblk->status_tag;
7046 tnapi->last_irq_tag = tnapi->last_tag;
7049 /* check for RX/TX work to do */
7050 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7051 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7053 /* This test here is not race free, but will reduce
7054 * the number of interrupts by looping again.
7056 if (tnapi == &tp->napi[1] && tp->rx_refill)
7059 napi_complete(napi);
7060 /* Reenable interrupts. */
7061 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7063 /* This test here is synchronized by napi_schedule()
7064 * and napi_complete() to close the race condition.
7066 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7067 tw32(HOSTCC_MODE, tp->coalesce_mode |
7068 HOSTCC_MODE_ENABLE |
7079 /* work_done is guaranteed to be less than budget. */
7080 napi_complete(napi);
7081 tg3_reset_task_schedule(tp);
7085 static void tg3_process_error(struct tg3 *tp)
7088 bool real_error = false;
7090 if (tg3_flag(tp, ERROR_PROCESSED))
7093 /* Check Flow Attention register */
7094 val = tr32(HOSTCC_FLOW_ATTN);
7095 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7096 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7100 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7101 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7105 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7106 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7115 tg3_flag_set(tp, ERROR_PROCESSED);
7116 tg3_reset_task_schedule(tp);
7119 static int tg3_poll(struct napi_struct *napi, int budget)
7121 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7122 struct tg3 *tp = tnapi->tp;
7124 struct tg3_hw_status *sblk = tnapi->hw_status;
7127 if (sblk->status & SD_STATUS_ERROR)
7128 tg3_process_error(tp);
7132 work_done = tg3_poll_work(tnapi, work_done, budget);
7134 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7137 if (unlikely(work_done >= budget))
7140 if (tg3_flag(tp, TAGGED_STATUS)) {
7141 /* tp->last_tag is used in tg3_int_reenable() below
7142 * to tell the hw how much work has been processed,
7143 * so we must read it before checking for more work.
7145 tnapi->last_tag = sblk->status_tag;
7146 tnapi->last_irq_tag = tnapi->last_tag;
7149 sblk->status &= ~SD_STATUS_UPDATED;
7151 if (likely(!tg3_has_work(tnapi))) {
7152 napi_complete(napi);
7153 tg3_int_reenable(tnapi);
7161 /* work_done is guaranteed to be less than budget. */
7162 napi_complete(napi);
7163 tg3_reset_task_schedule(tp);
7167 static void tg3_napi_disable(struct tg3 *tp)
7171 for (i = tp->irq_cnt - 1; i >= 0; i--)
7172 napi_disable(&tp->napi[i].napi);
7175 static void tg3_napi_enable(struct tg3 *tp)
7179 for (i = 0; i < tp->irq_cnt; i++)
7180 napi_enable(&tp->napi[i].napi);
7183 static void tg3_napi_init(struct tg3 *tp)
7187 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7188 for (i = 1; i < tp->irq_cnt; i++)
7189 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7192 static void tg3_napi_fini(struct tg3 *tp)
7196 for (i = 0; i < tp->irq_cnt; i++)
7197 netif_napi_del(&tp->napi[i].napi);
7200 static inline void tg3_netif_stop(struct tg3 *tp)
7202 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7203 tg3_napi_disable(tp);
7204 netif_carrier_off(tp->dev);
7205 netif_tx_disable(tp->dev);
7208 /* tp->lock must be held */
7209 static inline void tg3_netif_start(struct tg3 *tp)
7213 /* NOTE: unconditional netif_tx_wake_all_queues is only
7214 * appropriate so long as all callers are assured to
7215 * have free tx slots (such as after tg3_init_hw)
7217 netif_tx_wake_all_queues(tp->dev);
7220 netif_carrier_on(tp->dev);
7222 tg3_napi_enable(tp);
7223 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7224 tg3_enable_ints(tp);
7227 static void tg3_irq_quiesce(struct tg3 *tp)
7231 BUG_ON(tp->irq_sync);
7236 for (i = 0; i < tp->irq_cnt; i++)
7237 synchronize_irq(tp->napi[i].irq_vec);
7240 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7241 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7242 * with as well. Most of the time, this is not necessary except when
7243 * shutting down the device.
7245 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7247 spin_lock_bh(&tp->lock);
7249 tg3_irq_quiesce(tp);
7252 static inline void tg3_full_unlock(struct tg3 *tp)
7254 spin_unlock_bh(&tp->lock);
7257 /* One-shot MSI handler - Chip automatically disables interrupt
7258 * after sending MSI so driver doesn't have to do it.
7260 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7262 struct tg3_napi *tnapi = dev_id;
7263 struct tg3 *tp = tnapi->tp;
7265 prefetch(tnapi->hw_status);
7267 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7269 if (likely(!tg3_irq_sync(tp)))
7270 napi_schedule(&tnapi->napi);
7275 /* MSI ISR - No need to check for interrupt sharing and no need to
7276 * flush status block and interrupt mailbox. PCI ordering rules
7277 * guarantee that MSI will arrive after the status block.
7279 static irqreturn_t tg3_msi(int irq, void *dev_id)
7281 struct tg3_napi *tnapi = dev_id;
7282 struct tg3 *tp = tnapi->tp;
7284 prefetch(tnapi->hw_status);
7286 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7288 * Writing any value to intr-mbox-0 clears PCI INTA# and
7289 * chip-internal interrupt pending events.
7290 * Writing non-zero to intr-mbox-0 additional tells the
7291 * NIC to stop sending us irqs, engaging "in-intr-handler"
7294 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7295 if (likely(!tg3_irq_sync(tp)))
7296 napi_schedule(&tnapi->napi);
7298 return IRQ_RETVAL(1);
7301 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7303 struct tg3_napi *tnapi = dev_id;
7304 struct tg3 *tp = tnapi->tp;
7305 struct tg3_hw_status *sblk = tnapi->hw_status;
7306 unsigned int handled = 1;
7308 /* In INTx mode, it is possible for the interrupt to arrive at
7309 * the CPU before the status block posted prior to the interrupt.
7310 * Reading the PCI State register will confirm whether the
7311 * interrupt is ours and will flush the status block.
7313 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7314 if (tg3_flag(tp, CHIP_RESETTING) ||
7315 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7322 * Writing any value to intr-mbox-0 clears PCI INTA# and
7323 * chip-internal interrupt pending events.
7324 * Writing non-zero to intr-mbox-0 additional tells the
7325 * NIC to stop sending us irqs, engaging "in-intr-handler"
7328 * Flush the mailbox to de-assert the IRQ immediately to prevent
7329 * spurious interrupts. The flush impacts performance but
7330 * excessive spurious interrupts can be worse in some cases.
7332 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7333 if (tg3_irq_sync(tp))
7335 sblk->status &= ~SD_STATUS_UPDATED;
7336 if (likely(tg3_has_work(tnapi))) {
7337 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7338 napi_schedule(&tnapi->napi);
7340 /* No work, shared interrupt perhaps? re-enable
7341 * interrupts, and flush that PCI write
7343 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7347 return IRQ_RETVAL(handled);
7350 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7352 struct tg3_napi *tnapi = dev_id;
7353 struct tg3 *tp = tnapi->tp;
7354 struct tg3_hw_status *sblk = tnapi->hw_status;
7355 unsigned int handled = 1;
7357 /* In INTx mode, it is possible for the interrupt to arrive at
7358 * the CPU before the status block posted prior to the interrupt.
7359 * Reading the PCI State register will confirm whether the
7360 * interrupt is ours and will flush the status block.
7362 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7363 if (tg3_flag(tp, CHIP_RESETTING) ||
7364 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7371 * writing any value to intr-mbox-0 clears PCI INTA# and
7372 * chip-internal interrupt pending events.
7373 * writing non-zero to intr-mbox-0 additional tells the
7374 * NIC to stop sending us irqs, engaging "in-intr-handler"
7377 * Flush the mailbox to de-assert the IRQ immediately to prevent
7378 * spurious interrupts. The flush impacts performance but
7379 * excessive spurious interrupts can be worse in some cases.
7381 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7384 * In a shared interrupt configuration, sometimes other devices'
7385 * interrupts will scream. We record the current status tag here
7386 * so that the above check can report that the screaming interrupts
7387 * are unhandled. Eventually they will be silenced.
7389 tnapi->last_irq_tag = sblk->status_tag;
7391 if (tg3_irq_sync(tp))
7394 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7396 napi_schedule(&tnapi->napi);
7399 return IRQ_RETVAL(handled);
7402 /* ISR for interrupt test */
7403 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7405 struct tg3_napi *tnapi = dev_id;
7406 struct tg3 *tp = tnapi->tp;
7407 struct tg3_hw_status *sblk = tnapi->hw_status;
7409 if ((sblk->status & SD_STATUS_UPDATED) ||
7410 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7411 tg3_disable_ints(tp);
7412 return IRQ_RETVAL(1);
7414 return IRQ_RETVAL(0);
7417 #ifdef CONFIG_NET_POLL_CONTROLLER
7418 static void tg3_poll_controller(struct net_device *dev)
7421 struct tg3 *tp = netdev_priv(dev);
7423 if (tg3_irq_sync(tp))
7426 for (i = 0; i < tp->irq_cnt; i++)
7427 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7431 static void tg3_tx_timeout(struct net_device *dev)
7433 struct tg3 *tp = netdev_priv(dev);
7435 if (netif_msg_tx_err(tp)) {
7436 netdev_err(dev, "transmit timed out, resetting\n");
7440 tg3_reset_task_schedule(tp);
7443 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7444 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7446 u32 base = (u32) mapping & 0xffffffff;
7448 return (base > 0xffffdcc0) && (base + len + 8 < base);
7451 /* Test for DMA addresses > 40-bit */
7452 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7455 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7456 if (tg3_flag(tp, 40BIT_DMA_BUG))
7457 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7464 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7465 dma_addr_t mapping, u32 len, u32 flags,
7468 txbd->addr_hi = ((u64) mapping >> 32);
7469 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7470 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7471 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7474 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7475 dma_addr_t map, u32 len, u32 flags,
7478 struct tg3 *tp = tnapi->tp;
7481 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7484 if (tg3_4g_overflow_test(map, len))
7487 if (tg3_40bit_overflow_test(tp, map, len))
7490 if (tp->dma_limit) {
7491 u32 prvidx = *entry;
7492 u32 tmp_flag = flags & ~TXD_FLAG_END;
7493 while (len > tp->dma_limit && *budget) {
7494 u32 frag_len = tp->dma_limit;
7495 len -= tp->dma_limit;
7497 /* Avoid the 8byte DMA problem */
7499 len += tp->dma_limit / 2;
7500 frag_len = tp->dma_limit / 2;
7503 tnapi->tx_buffers[*entry].fragmented = true;
7505 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7506 frag_len, tmp_flag, mss, vlan);
7509 *entry = NEXT_TX(*entry);
7516 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7517 len, flags, mss, vlan);
7519 *entry = NEXT_TX(*entry);
7522 tnapi->tx_buffers[prvidx].fragmented = false;
7526 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7527 len, flags, mss, vlan);
7528 *entry = NEXT_TX(*entry);
7534 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7537 struct sk_buff *skb;
7538 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7543 pci_unmap_single(tnapi->tp->pdev,
7544 dma_unmap_addr(txb, mapping),
7548 while (txb->fragmented) {
7549 txb->fragmented = false;
7550 entry = NEXT_TX(entry);
7551 txb = &tnapi->tx_buffers[entry];
7554 for (i = 0; i <= last; i++) {
7555 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7557 entry = NEXT_TX(entry);
7558 txb = &tnapi->tx_buffers[entry];
7560 pci_unmap_page(tnapi->tp->pdev,
7561 dma_unmap_addr(txb, mapping),
7562 skb_frag_size(frag), PCI_DMA_TODEVICE);
7564 while (txb->fragmented) {
7565 txb->fragmented = false;
7566 entry = NEXT_TX(entry);
7567 txb = &tnapi->tx_buffers[entry];
7572 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7573 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7574 struct sk_buff **pskb,
7575 u32 *entry, u32 *budget,
7576 u32 base_flags, u32 mss, u32 vlan)
7578 struct tg3 *tp = tnapi->tp;
7579 struct sk_buff *new_skb, *skb = *pskb;
7580 dma_addr_t new_addr = 0;
7583 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7584 new_skb = skb_copy(skb, GFP_ATOMIC);
7586 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7588 new_skb = skb_copy_expand(skb,
7589 skb_headroom(skb) + more_headroom,
7590 skb_tailroom(skb), GFP_ATOMIC);
7596 /* New SKB is guaranteed to be linear. */
7597 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7599 /* Make sure the mapping succeeded */
7600 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7601 dev_kfree_skb(new_skb);
7604 u32 save_entry = *entry;
7606 base_flags |= TXD_FLAG_END;
7608 tnapi->tx_buffers[*entry].skb = new_skb;
7609 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7612 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7613 new_skb->len, base_flags,
7615 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7616 dev_kfree_skb(new_skb);
7627 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7629 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7630 * TSO header is greater than 80 bytes.
7632 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7634 struct sk_buff *segs, *nskb;
7635 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7637 /* Estimate the number of fragments in the worst case */
7638 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7639 netif_stop_queue(tp->dev);
7641 /* netif_tx_stop_queue() must be done before checking
7642 * checking tx index in tg3_tx_avail() below, because in
7643 * tg3_tx(), we update tx index before checking for
7644 * netif_tx_queue_stopped().
7647 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7648 return NETDEV_TX_BUSY;
7650 netif_wake_queue(tp->dev);
7653 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7655 goto tg3_tso_bug_end;
7661 tg3_start_xmit(nskb, tp->dev);
7667 return NETDEV_TX_OK;
7670 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7671 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7673 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7675 struct tg3 *tp = netdev_priv(dev);
7676 u32 len, entry, base_flags, mss, vlan = 0;
7678 int i = -1, would_hit_hwbug;
7680 struct tg3_napi *tnapi;
7681 struct netdev_queue *txq;
7684 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7685 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7686 if (tg3_flag(tp, ENABLE_TSS))
7689 budget = tg3_tx_avail(tnapi);
7691 /* We are running in BH disabled context with netif_tx_lock
7692 * and TX reclaim runs via tp->napi.poll inside of a software
7693 * interrupt. Furthermore, IRQ processing runs lockless so we have
7694 * no IRQ context deadlocks to worry about either. Rejoice!
7696 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7697 if (!netif_tx_queue_stopped(txq)) {
7698 netif_tx_stop_queue(txq);
7700 /* This is a hard error, log it. */
7702 "BUG! Tx Ring full when queue awake!\n");
7704 return NETDEV_TX_BUSY;
7707 entry = tnapi->tx_prod;
7709 if (skb->ip_summed == CHECKSUM_PARTIAL)
7710 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7712 mss = skb_shinfo(skb)->gso_size;
7715 u32 tcp_opt_len, hdr_len;
7717 if (skb_header_cloned(skb) &&
7718 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7722 tcp_opt_len = tcp_optlen(skb);
7724 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7726 if (!skb_is_gso_v6(skb)) {
7728 iph->tot_len = htons(mss + hdr_len);
7731 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7732 tg3_flag(tp, TSO_BUG))
7733 return tg3_tso_bug(tp, skb);
7735 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7736 TXD_FLAG_CPU_POST_DMA);
7738 if (tg3_flag(tp, HW_TSO_1) ||
7739 tg3_flag(tp, HW_TSO_2) ||
7740 tg3_flag(tp, HW_TSO_3)) {
7741 tcp_hdr(skb)->check = 0;
7742 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7744 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7749 if (tg3_flag(tp, HW_TSO_3)) {
7750 mss |= (hdr_len & 0xc) << 12;
7752 base_flags |= 0x00000010;
7753 base_flags |= (hdr_len & 0x3e0) << 5;
7754 } else if (tg3_flag(tp, HW_TSO_2))
7755 mss |= hdr_len << 9;
7756 else if (tg3_flag(tp, HW_TSO_1) ||
7757 tg3_asic_rev(tp) == ASIC_REV_5705) {
7758 if (tcp_opt_len || iph->ihl > 5) {
7761 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7762 mss |= (tsflags << 11);
7765 if (tcp_opt_len || iph->ihl > 5) {
7768 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7769 base_flags |= tsflags << 12;
7774 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7775 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7776 base_flags |= TXD_FLAG_JMB_PKT;
7778 if (vlan_tx_tag_present(skb)) {
7779 base_flags |= TXD_FLAG_VLAN;
7780 vlan = vlan_tx_tag_get(skb);
7783 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7784 tg3_flag(tp, TX_TSTAMP_EN)) {
7785 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7786 base_flags |= TXD_FLAG_HWTSTAMP;
7789 len = skb_headlen(skb);
7791 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7792 if (pci_dma_mapping_error(tp->pdev, mapping))
7796 tnapi->tx_buffers[entry].skb = skb;
7797 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7799 would_hit_hwbug = 0;
7801 if (tg3_flag(tp, 5701_DMA_BUG))
7802 would_hit_hwbug = 1;
7804 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7805 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7807 would_hit_hwbug = 1;
7808 } else if (skb_shinfo(skb)->nr_frags > 0) {
7811 if (!tg3_flag(tp, HW_TSO_1) &&
7812 !tg3_flag(tp, HW_TSO_2) &&
7813 !tg3_flag(tp, HW_TSO_3))
7816 /* Now loop through additional data
7817 * fragments, and queue them.
7819 last = skb_shinfo(skb)->nr_frags - 1;
7820 for (i = 0; i <= last; i++) {
7821 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7823 len = skb_frag_size(frag);
7824 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7825 len, DMA_TO_DEVICE);
7827 tnapi->tx_buffers[entry].skb = NULL;
7828 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7830 if (dma_mapping_error(&tp->pdev->dev, mapping))
7834 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7836 ((i == last) ? TXD_FLAG_END : 0),
7838 would_hit_hwbug = 1;
7844 if (would_hit_hwbug) {
7845 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7847 /* If the workaround fails due to memory/mapping
7848 * failure, silently drop this packet.
7850 entry = tnapi->tx_prod;
7851 budget = tg3_tx_avail(tnapi);
7852 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7853 base_flags, mss, vlan))
7857 skb_tx_timestamp(skb);
7858 netdev_tx_sent_queue(txq, skb->len);
7860 /* Sync BD data before updating mailbox */
7863 /* Packets are ready, update Tx producer idx local and on card. */
7864 tw32_tx_mbox(tnapi->prodmbox, entry);
7866 tnapi->tx_prod = entry;
7867 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7868 netif_tx_stop_queue(txq);
7870 /* netif_tx_stop_queue() must be done before checking
7871 * checking tx index in tg3_tx_avail() below, because in
7872 * tg3_tx(), we update tx index before checking for
7873 * netif_tx_queue_stopped().
7876 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7877 netif_tx_wake_queue(txq);
7881 return NETDEV_TX_OK;
7884 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7885 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7890 return NETDEV_TX_OK;
7893 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7896 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7897 MAC_MODE_PORT_MODE_MASK);
7899 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7901 if (!tg3_flag(tp, 5705_PLUS))
7902 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7904 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7905 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7907 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7909 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7911 if (tg3_flag(tp, 5705_PLUS) ||
7912 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7913 tg3_asic_rev(tp) == ASIC_REV_5700)
7914 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7917 tw32(MAC_MODE, tp->mac_mode);
7921 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7923 u32 val, bmcr, mac_mode, ptest = 0;
7925 tg3_phy_toggle_apd(tp, false);
7926 tg3_phy_toggle_automdix(tp, false);
7928 if (extlpbk && tg3_phy_set_extloopbk(tp))
7931 bmcr = BMCR_FULLDPLX;
7936 bmcr |= BMCR_SPEED100;
7940 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7942 bmcr |= BMCR_SPEED100;
7945 bmcr |= BMCR_SPEED1000;
7950 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7951 tg3_readphy(tp, MII_CTRL1000, &val);
7952 val |= CTL1000_AS_MASTER |
7953 CTL1000_ENABLE_MASTER;
7954 tg3_writephy(tp, MII_CTRL1000, val);
7956 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7957 MII_TG3_FET_PTEST_TRIM_2;
7958 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7961 bmcr |= BMCR_LOOPBACK;
7963 tg3_writephy(tp, MII_BMCR, bmcr);
7965 /* The write needs to be flushed for the FETs */
7966 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7967 tg3_readphy(tp, MII_BMCR, &bmcr);
7971 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7972 tg3_asic_rev(tp) == ASIC_REV_5785) {
7973 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7974 MII_TG3_FET_PTEST_FRC_TX_LINK |
7975 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7977 /* The write needs to be flushed for the AC131 */
7978 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7981 /* Reset to prevent losing 1st rx packet intermittently */
7982 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7983 tg3_flag(tp, 5780_CLASS)) {
7984 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7986 tw32_f(MAC_RX_MODE, tp->rx_mode);
7989 mac_mode = tp->mac_mode &
7990 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7991 if (speed == SPEED_1000)
7992 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7994 mac_mode |= MAC_MODE_PORT_MODE_MII;
7996 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7997 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7999 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8000 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8001 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8002 mac_mode |= MAC_MODE_LINK_POLARITY;
8004 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8005 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8008 tw32(MAC_MODE, mac_mode);
8014 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8016 struct tg3 *tp = netdev_priv(dev);
8018 if (features & NETIF_F_LOOPBACK) {
8019 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8022 spin_lock_bh(&tp->lock);
8023 tg3_mac_loopback(tp, true);
8024 netif_carrier_on(tp->dev);
8025 spin_unlock_bh(&tp->lock);
8026 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8028 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8031 spin_lock_bh(&tp->lock);
8032 tg3_mac_loopback(tp, false);
8033 /* Force link status check */
8034 tg3_setup_phy(tp, true);
8035 spin_unlock_bh(&tp->lock);
8036 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8040 static netdev_features_t tg3_fix_features(struct net_device *dev,
8041 netdev_features_t features)
8043 struct tg3 *tp = netdev_priv(dev);
8045 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8046 features &= ~NETIF_F_ALL_TSO;
8051 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8053 netdev_features_t changed = dev->features ^ features;
8055 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8056 tg3_set_loopback(dev, features);
8061 static void tg3_rx_prodring_free(struct tg3 *tp,
8062 struct tg3_rx_prodring_set *tpr)
8066 if (tpr != &tp->napi[0].prodring) {
8067 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8068 i = (i + 1) & tp->rx_std_ring_mask)
8069 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8072 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8073 for (i = tpr->rx_jmb_cons_idx;
8074 i != tpr->rx_jmb_prod_idx;
8075 i = (i + 1) & tp->rx_jmb_ring_mask) {
8076 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8084 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8085 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8088 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8089 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8090 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8095 /* Initialize rx rings for packet processing.
8097 * The chip has been shut down and the driver detached from
8098 * the networking, so no interrupts or new tx packets will
8099 * end up in the driver. tp->{tx,}lock are held and thus
8102 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8103 struct tg3_rx_prodring_set *tpr)
8105 u32 i, rx_pkt_dma_sz;
8107 tpr->rx_std_cons_idx = 0;
8108 tpr->rx_std_prod_idx = 0;
8109 tpr->rx_jmb_cons_idx = 0;
8110 tpr->rx_jmb_prod_idx = 0;
8112 if (tpr != &tp->napi[0].prodring) {
8113 memset(&tpr->rx_std_buffers[0], 0,
8114 TG3_RX_STD_BUFF_RING_SIZE(tp));
8115 if (tpr->rx_jmb_buffers)
8116 memset(&tpr->rx_jmb_buffers[0], 0,
8117 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8121 /* Zero out all descriptors. */
8122 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8124 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8125 if (tg3_flag(tp, 5780_CLASS) &&
8126 tp->dev->mtu > ETH_DATA_LEN)
8127 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8128 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8130 /* Initialize invariants of the rings, we only set this
8131 * stuff once. This works because the card does not
8132 * write into the rx buffer posting rings.
8134 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8135 struct tg3_rx_buffer_desc *rxd;
8137 rxd = &tpr->rx_std[i];
8138 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8139 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8140 rxd->opaque = (RXD_OPAQUE_RING_STD |
8141 (i << RXD_OPAQUE_INDEX_SHIFT));
8144 /* Now allocate fresh SKBs for each rx ring. */
8145 for (i = 0; i < tp->rx_pending; i++) {
8146 unsigned int frag_size;
8148 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8150 netdev_warn(tp->dev,
8151 "Using a smaller RX standard ring. Only "
8152 "%d out of %d buffers were allocated "
8153 "successfully\n", i, tp->rx_pending);
8161 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8164 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8166 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8169 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8170 struct tg3_rx_buffer_desc *rxd;
8172 rxd = &tpr->rx_jmb[i].std;
8173 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8174 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8176 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8177 (i << RXD_OPAQUE_INDEX_SHIFT));
8180 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8181 unsigned int frag_size;
8183 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8185 netdev_warn(tp->dev,
8186 "Using a smaller RX jumbo ring. Only %d "
8187 "out of %d buffers were allocated "
8188 "successfully\n", i, tp->rx_jumbo_pending);
8191 tp->rx_jumbo_pending = i;
8200 tg3_rx_prodring_free(tp, tpr);
8204 static void tg3_rx_prodring_fini(struct tg3 *tp,
8205 struct tg3_rx_prodring_set *tpr)
8207 kfree(tpr->rx_std_buffers);
8208 tpr->rx_std_buffers = NULL;
8209 kfree(tpr->rx_jmb_buffers);
8210 tpr->rx_jmb_buffers = NULL;
8212 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8213 tpr->rx_std, tpr->rx_std_mapping);
8217 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8218 tpr->rx_jmb, tpr->rx_jmb_mapping);
8223 static int tg3_rx_prodring_init(struct tg3 *tp,
8224 struct tg3_rx_prodring_set *tpr)
8226 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8228 if (!tpr->rx_std_buffers)
8231 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8232 TG3_RX_STD_RING_BYTES(tp),
8233 &tpr->rx_std_mapping,
8238 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8239 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8241 if (!tpr->rx_jmb_buffers)
8244 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8245 TG3_RX_JMB_RING_BYTES(tp),
8246 &tpr->rx_jmb_mapping,
8255 tg3_rx_prodring_fini(tp, tpr);
8259 /* Free up pending packets in all rx/tx rings.
8261 * The chip has been shut down and the driver detached from
8262 * the networking, so no interrupts or new tx packets will
8263 * end up in the driver. tp->{tx,}lock is not held and we are not
8264 * in an interrupt context and thus may sleep.
8266 static void tg3_free_rings(struct tg3 *tp)
8270 for (j = 0; j < tp->irq_cnt; j++) {
8271 struct tg3_napi *tnapi = &tp->napi[j];
8273 tg3_rx_prodring_free(tp, &tnapi->prodring);
8275 if (!tnapi->tx_buffers)
8278 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8279 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8284 tg3_tx_skb_unmap(tnapi, i,
8285 skb_shinfo(skb)->nr_frags - 1);
8287 dev_kfree_skb_any(skb);
8289 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8293 /* Initialize tx/rx rings for packet processing.
8295 * The chip has been shut down and the driver detached from
8296 * the networking, so no interrupts or new tx packets will
8297 * end up in the driver. tp->{tx,}lock are held and thus
8300 static int tg3_init_rings(struct tg3 *tp)
8304 /* Free up all the SKBs. */
8307 for (i = 0; i < tp->irq_cnt; i++) {
8308 struct tg3_napi *tnapi = &tp->napi[i];
8310 tnapi->last_tag = 0;
8311 tnapi->last_irq_tag = 0;
8312 tnapi->hw_status->status = 0;
8313 tnapi->hw_status->status_tag = 0;
8314 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8319 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8321 tnapi->rx_rcb_ptr = 0;
8323 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8325 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8334 static void tg3_mem_tx_release(struct tg3 *tp)
8338 for (i = 0; i < tp->irq_max; i++) {
8339 struct tg3_napi *tnapi = &tp->napi[i];
8341 if (tnapi->tx_ring) {
8342 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8343 tnapi->tx_ring, tnapi->tx_desc_mapping);
8344 tnapi->tx_ring = NULL;
8347 kfree(tnapi->tx_buffers);
8348 tnapi->tx_buffers = NULL;
8352 static int tg3_mem_tx_acquire(struct tg3 *tp)
8355 struct tg3_napi *tnapi = &tp->napi[0];
8357 /* If multivector TSS is enabled, vector 0 does not handle
8358 * tx interrupts. Don't allocate any resources for it.
8360 if (tg3_flag(tp, ENABLE_TSS))
8363 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8364 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8365 TG3_TX_RING_SIZE, GFP_KERNEL);
8366 if (!tnapi->tx_buffers)
8369 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8371 &tnapi->tx_desc_mapping,
8373 if (!tnapi->tx_ring)
8380 tg3_mem_tx_release(tp);
8384 static void tg3_mem_rx_release(struct tg3 *tp)
8388 for (i = 0; i < tp->irq_max; i++) {
8389 struct tg3_napi *tnapi = &tp->napi[i];
8391 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8396 dma_free_coherent(&tp->pdev->dev,
8397 TG3_RX_RCB_RING_BYTES(tp),
8399 tnapi->rx_rcb_mapping);
8400 tnapi->rx_rcb = NULL;
8404 static int tg3_mem_rx_acquire(struct tg3 *tp)
8406 unsigned int i, limit;
8408 limit = tp->rxq_cnt;
8410 /* If RSS is enabled, we need a (dummy) producer ring
8411 * set on vector zero. This is the true hw prodring.
8413 if (tg3_flag(tp, ENABLE_RSS))
8416 for (i = 0; i < limit; i++) {
8417 struct tg3_napi *tnapi = &tp->napi[i];
8419 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8422 /* If multivector RSS is enabled, vector 0
8423 * does not handle rx or tx interrupts.
8424 * Don't allocate any resources for it.
8426 if (!i && tg3_flag(tp, ENABLE_RSS))
8429 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8430 TG3_RX_RCB_RING_BYTES(tp),
8431 &tnapi->rx_rcb_mapping,
8432 GFP_KERNEL | __GFP_ZERO);
8440 tg3_mem_rx_release(tp);
8445 * Must not be invoked with interrupt sources disabled and
8446 * the hardware shutdown down.
8448 static void tg3_free_consistent(struct tg3 *tp)
8452 for (i = 0; i < tp->irq_cnt; i++) {
8453 struct tg3_napi *tnapi = &tp->napi[i];
8455 if (tnapi->hw_status) {
8456 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8458 tnapi->status_mapping);
8459 tnapi->hw_status = NULL;
8463 tg3_mem_rx_release(tp);
8464 tg3_mem_tx_release(tp);
8467 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8468 tp->hw_stats, tp->stats_mapping);
8469 tp->hw_stats = NULL;
8474 * Must not be invoked with interrupt sources disabled and
8475 * the hardware shutdown down. Can sleep.
8477 static int tg3_alloc_consistent(struct tg3 *tp)
8481 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8482 sizeof(struct tg3_hw_stats),
8484 GFP_KERNEL | __GFP_ZERO);
8488 for (i = 0; i < tp->irq_cnt; i++) {
8489 struct tg3_napi *tnapi = &tp->napi[i];
8490 struct tg3_hw_status *sblk;
8492 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8494 &tnapi->status_mapping,
8495 GFP_KERNEL | __GFP_ZERO);
8496 if (!tnapi->hw_status)
8499 sblk = tnapi->hw_status;
8501 if (tg3_flag(tp, ENABLE_RSS)) {
8502 u16 *prodptr = NULL;
8505 * When RSS is enabled, the status block format changes
8506 * slightly. The "rx_jumbo_consumer", "reserved",
8507 * and "rx_mini_consumer" members get mapped to the
8508 * other three rx return ring producer indexes.
8512 prodptr = &sblk->idx[0].rx_producer;
8515 prodptr = &sblk->rx_jumbo_consumer;
8518 prodptr = &sblk->reserved;
8521 prodptr = &sblk->rx_mini_consumer;
8524 tnapi->rx_rcb_prod_idx = prodptr;
8526 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8530 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8536 tg3_free_consistent(tp);
8540 #define MAX_WAIT_CNT 1000
8542 /* To stop a block, clear the enable bit and poll till it
8543 * clears. tp->lock is held.
8545 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8550 if (tg3_flag(tp, 5705_PLUS)) {
8557 /* We can't enable/disable these bits of the
8558 * 5705/5750, just say success.
8571 for (i = 0; i < MAX_WAIT_CNT; i++) {
8574 if ((val & enable_bit) == 0)
8578 if (i == MAX_WAIT_CNT && !silent) {
8579 dev_err(&tp->pdev->dev,
8580 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8588 /* tp->lock is held. */
8589 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8593 tg3_disable_ints(tp);
8595 tp->rx_mode &= ~RX_MODE_ENABLE;
8596 tw32_f(MAC_RX_MODE, tp->rx_mode);
8599 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8600 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8601 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8602 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8603 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8604 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8606 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8607 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8608 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8609 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8610 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8611 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8612 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8614 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8615 tw32_f(MAC_MODE, tp->mac_mode);
8618 tp->tx_mode &= ~TX_MODE_ENABLE;
8619 tw32_f(MAC_TX_MODE, tp->tx_mode);
8621 for (i = 0; i < MAX_WAIT_CNT; i++) {
8623 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8626 if (i >= MAX_WAIT_CNT) {
8627 dev_err(&tp->pdev->dev,
8628 "%s timed out, TX_MODE_ENABLE will not clear "
8629 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8633 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8634 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8635 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8637 tw32(FTQ_RESET, 0xffffffff);
8638 tw32(FTQ_RESET, 0x00000000);
8640 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8641 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8643 for (i = 0; i < tp->irq_cnt; i++) {
8644 struct tg3_napi *tnapi = &tp->napi[i];
8645 if (tnapi->hw_status)
8646 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8652 /* Save PCI command register before chip reset */
8653 static void tg3_save_pci_state(struct tg3 *tp)
8655 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8658 /* Restore PCI state after chip reset */
8659 static void tg3_restore_pci_state(struct tg3 *tp)
8663 /* Re-enable indirect register accesses. */
8664 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8665 tp->misc_host_ctrl);
8667 /* Set MAX PCI retry to zero. */
8668 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8669 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8670 tg3_flag(tp, PCIX_MODE))
8671 val |= PCISTATE_RETRY_SAME_DMA;
8672 /* Allow reads and writes to the APE register and memory space. */
8673 if (tg3_flag(tp, ENABLE_APE))
8674 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8675 PCISTATE_ALLOW_APE_SHMEM_WR |
8676 PCISTATE_ALLOW_APE_PSPACE_WR;
8677 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8679 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8681 if (!tg3_flag(tp, PCI_EXPRESS)) {
8682 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8683 tp->pci_cacheline_sz);
8684 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8688 /* Make sure PCI-X relaxed ordering bit is clear. */
8689 if (tg3_flag(tp, PCIX_MODE)) {
8692 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8694 pcix_cmd &= ~PCI_X_CMD_ERO;
8695 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8699 if (tg3_flag(tp, 5780_CLASS)) {
8701 /* Chip reset on 5780 will reset MSI enable bit,
8702 * so need to restore it.
8704 if (tg3_flag(tp, USING_MSI)) {
8707 pci_read_config_word(tp->pdev,
8708 tp->msi_cap + PCI_MSI_FLAGS,
8710 pci_write_config_word(tp->pdev,
8711 tp->msi_cap + PCI_MSI_FLAGS,
8712 ctrl | PCI_MSI_FLAGS_ENABLE);
8713 val = tr32(MSGINT_MODE);
8714 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8719 /* tp->lock is held. */
8720 static int tg3_chip_reset(struct tg3 *tp)
8723 void (*write_op)(struct tg3 *, u32, u32);
8728 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8730 /* No matching tg3_nvram_unlock() after this because
8731 * chip reset below will undo the nvram lock.
8733 tp->nvram_lock_cnt = 0;
8735 /* GRC_MISC_CFG core clock reset will clear the memory
8736 * enable bit in PCI register 4 and the MSI enable bit
8737 * on some chips, so we save relevant registers here.
8739 tg3_save_pci_state(tp);
8741 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8742 tg3_flag(tp, 5755_PLUS))
8743 tw32(GRC_FASTBOOT_PC, 0);
8746 * We must avoid the readl() that normally takes place.
8747 * It locks machines, causes machine checks, and other
8748 * fun things. So, temporarily disable the 5701
8749 * hardware workaround, while we do the reset.
8751 write_op = tp->write32;
8752 if (write_op == tg3_write_flush_reg32)
8753 tp->write32 = tg3_write32;
8755 /* Prevent the irq handler from reading or writing PCI registers
8756 * during chip reset when the memory enable bit in the PCI command
8757 * register may be cleared. The chip does not generate interrupt
8758 * at this time, but the irq handler may still be called due to irq
8759 * sharing or irqpoll.
8761 tg3_flag_set(tp, CHIP_RESETTING);
8762 for (i = 0; i < tp->irq_cnt; i++) {
8763 struct tg3_napi *tnapi = &tp->napi[i];
8764 if (tnapi->hw_status) {
8765 tnapi->hw_status->status = 0;
8766 tnapi->hw_status->status_tag = 0;
8768 tnapi->last_tag = 0;
8769 tnapi->last_irq_tag = 0;
8773 for (i = 0; i < tp->irq_cnt; i++)
8774 synchronize_irq(tp->napi[i].irq_vec);
8776 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8777 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8778 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8782 val = GRC_MISC_CFG_CORECLK_RESET;
8784 if (tg3_flag(tp, PCI_EXPRESS)) {
8785 /* Force PCIe 1.0a mode */
8786 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8787 !tg3_flag(tp, 57765_PLUS) &&
8788 tr32(TG3_PCIE_PHY_TSTCTL) ==
8789 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8790 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8792 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8793 tw32(GRC_MISC_CFG, (1 << 29));
8798 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8799 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8800 tw32(GRC_VCPU_EXT_CTRL,
8801 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8804 /* Manage gphy power for all CPMU absent PCIe devices. */
8805 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8806 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8808 tw32(GRC_MISC_CFG, val);
8810 /* restore 5701 hardware bug workaround write method */
8811 tp->write32 = write_op;
8813 /* Unfortunately, we have to delay before the PCI read back.
8814 * Some 575X chips even will not respond to a PCI cfg access
8815 * when the reset command is given to the chip.
8817 * How do these hardware designers expect things to work
8818 * properly if the PCI write is posted for a long period
8819 * of time? It is always necessary to have some method by
8820 * which a register read back can occur to push the write
8821 * out which does the reset.
8823 * For most tg3 variants the trick below was working.
8828 /* Flush PCI posted writes. The normal MMIO registers
8829 * are inaccessible at this time so this is the only
8830 * way to make this reliably (actually, this is no longer
8831 * the case, see above). I tried to use indirect
8832 * register read/write but this upset some 5701 variants.
8834 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8838 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8841 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8845 /* Wait for link training to complete. */
8846 for (j = 0; j < 5000; j++)
8849 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8850 pci_write_config_dword(tp->pdev, 0xc4,
8851 cfg_val | (1 << 15));
8854 /* Clear the "no snoop" and "relaxed ordering" bits. */
8855 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8857 * Older PCIe devices only support the 128 byte
8858 * MPS setting. Enforce the restriction.
8860 if (!tg3_flag(tp, CPMU_PRESENT))
8861 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8862 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8864 /* Clear error status */
8865 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8866 PCI_EXP_DEVSTA_CED |
8867 PCI_EXP_DEVSTA_NFED |
8868 PCI_EXP_DEVSTA_FED |
8869 PCI_EXP_DEVSTA_URD);
8872 tg3_restore_pci_state(tp);
8874 tg3_flag_clear(tp, CHIP_RESETTING);
8875 tg3_flag_clear(tp, ERROR_PROCESSED);
8878 if (tg3_flag(tp, 5780_CLASS))
8879 val = tr32(MEMARB_MODE);
8880 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8882 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8884 tw32(0x5000, 0x400);
8887 if (tg3_flag(tp, IS_SSB_CORE)) {
8889 * BCM4785: In order to avoid repercussions from using
8890 * potentially defective internal ROM, stop the Rx RISC CPU,
8891 * which is not required.
8894 tg3_halt_cpu(tp, RX_CPU_BASE);
8897 tw32(GRC_MODE, tp->grc_mode);
8899 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8902 tw32(0xc4, val | (1 << 15));
8905 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8906 tg3_asic_rev(tp) == ASIC_REV_5705) {
8907 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8908 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8909 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8910 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8913 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8914 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8916 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8917 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8922 tw32_f(MAC_MODE, val);
8925 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8927 err = tg3_poll_fw(tp);
8933 if (tg3_flag(tp, PCI_EXPRESS) &&
8934 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8935 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8936 !tg3_flag(tp, 57765_PLUS)) {
8939 tw32(0x7c00, val | (1 << 25));
8942 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8943 val = tr32(TG3_CPMU_CLCK_ORIDE);
8944 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8947 /* Reprobe ASF enable state. */
8948 tg3_flag_clear(tp, ENABLE_ASF);
8949 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8950 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8952 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8953 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8954 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8957 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8958 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8959 tg3_flag_set(tp, ENABLE_ASF);
8960 tp->last_event_jiffies = jiffies;
8961 if (tg3_flag(tp, 5750_PLUS))
8962 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8964 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8965 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8966 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8967 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8968 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8975 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8976 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8978 /* tp->lock is held. */
8979 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
8985 tg3_write_sig_pre_reset(tp, kind);
8987 tg3_abort_hw(tp, silent);
8988 err = tg3_chip_reset(tp);
8990 __tg3_set_mac_addr(tp, false);
8992 tg3_write_sig_legacy(tp, kind);
8993 tg3_write_sig_post_reset(tp, kind);
8996 /* Save the stats across chip resets... */
8997 tg3_get_nstats(tp, &tp->net_stats_prev);
8998 tg3_get_estats(tp, &tp->estats_prev);
9000 /* And make sure the next sample is new data */
9001 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9010 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9012 struct tg3 *tp = netdev_priv(dev);
9013 struct sockaddr *addr = p;
9015 bool skip_mac_1 = false;
9017 if (!is_valid_ether_addr(addr->sa_data))
9018 return -EADDRNOTAVAIL;
9020 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9022 if (!netif_running(dev))
9025 if (tg3_flag(tp, ENABLE_ASF)) {
9026 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9028 addr0_high = tr32(MAC_ADDR_0_HIGH);
9029 addr0_low = tr32(MAC_ADDR_0_LOW);
9030 addr1_high = tr32(MAC_ADDR_1_HIGH);
9031 addr1_low = tr32(MAC_ADDR_1_LOW);
9033 /* Skip MAC addr 1 if ASF is using it. */
9034 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9035 !(addr1_high == 0 && addr1_low == 0))
9038 spin_lock_bh(&tp->lock);
9039 __tg3_set_mac_addr(tp, skip_mac_1);
9040 spin_unlock_bh(&tp->lock);
9045 /* tp->lock is held. */
9046 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9047 dma_addr_t mapping, u32 maxlen_flags,
9051 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9052 ((u64) mapping >> 32));
9054 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9055 ((u64) mapping & 0xffffffff));
9057 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9060 if (!tg3_flag(tp, 5705_PLUS))
9062 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9067 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9071 if (!tg3_flag(tp, ENABLE_TSS)) {
9072 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9073 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9074 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9076 tw32(HOSTCC_TXCOL_TICKS, 0);
9077 tw32(HOSTCC_TXMAX_FRAMES, 0);
9078 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9080 for (; i < tp->txq_cnt; i++) {
9083 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9084 tw32(reg, ec->tx_coalesce_usecs);
9085 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9086 tw32(reg, ec->tx_max_coalesced_frames);
9087 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9088 tw32(reg, ec->tx_max_coalesced_frames_irq);
9092 for (; i < tp->irq_max - 1; i++) {
9093 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9094 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9095 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9099 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9102 u32 limit = tp->rxq_cnt;
9104 if (!tg3_flag(tp, ENABLE_RSS)) {
9105 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9106 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9107 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9110 tw32(HOSTCC_RXCOL_TICKS, 0);
9111 tw32(HOSTCC_RXMAX_FRAMES, 0);
9112 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9115 for (; i < limit; i++) {
9118 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9119 tw32(reg, ec->rx_coalesce_usecs);
9120 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9121 tw32(reg, ec->rx_max_coalesced_frames);
9122 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9123 tw32(reg, ec->rx_max_coalesced_frames_irq);
9126 for (; i < tp->irq_max - 1; i++) {
9127 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9128 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9129 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9133 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9135 tg3_coal_tx_init(tp, ec);
9136 tg3_coal_rx_init(tp, ec);
9138 if (!tg3_flag(tp, 5705_PLUS)) {
9139 u32 val = ec->stats_block_coalesce_usecs;
9141 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9142 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9147 tw32(HOSTCC_STAT_COAL_TICKS, val);
9151 /* tp->lock is held. */
9152 static void tg3_rings_reset(struct tg3 *tp)
9155 u32 stblk, txrcb, rxrcb, limit;
9156 struct tg3_napi *tnapi = &tp->napi[0];
9158 /* Disable all transmit rings but the first. */
9159 if (!tg3_flag(tp, 5705_PLUS))
9160 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9161 else if (tg3_flag(tp, 5717_PLUS))
9162 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9163 else if (tg3_flag(tp, 57765_CLASS) ||
9164 tg3_asic_rev(tp) == ASIC_REV_5762)
9165 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9167 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9169 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9170 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9171 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9172 BDINFO_FLAGS_DISABLED);
9175 /* Disable all receive return rings but the first. */
9176 if (tg3_flag(tp, 5717_PLUS))
9177 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9178 else if (!tg3_flag(tp, 5705_PLUS))
9179 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9180 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9181 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9182 tg3_flag(tp, 57765_CLASS))
9183 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9185 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9187 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9188 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9189 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9190 BDINFO_FLAGS_DISABLED);
9192 /* Disable interrupts */
9193 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9194 tp->napi[0].chk_msi_cnt = 0;
9195 tp->napi[0].last_rx_cons = 0;
9196 tp->napi[0].last_tx_cons = 0;
9198 /* Zero mailbox registers. */
9199 if (tg3_flag(tp, SUPPORT_MSIX)) {
9200 for (i = 1; i < tp->irq_max; i++) {
9201 tp->napi[i].tx_prod = 0;
9202 tp->napi[i].tx_cons = 0;
9203 if (tg3_flag(tp, ENABLE_TSS))
9204 tw32_mailbox(tp->napi[i].prodmbox, 0);
9205 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9206 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9207 tp->napi[i].chk_msi_cnt = 0;
9208 tp->napi[i].last_rx_cons = 0;
9209 tp->napi[i].last_tx_cons = 0;
9211 if (!tg3_flag(tp, ENABLE_TSS))
9212 tw32_mailbox(tp->napi[0].prodmbox, 0);
9214 tp->napi[0].tx_prod = 0;
9215 tp->napi[0].tx_cons = 0;
9216 tw32_mailbox(tp->napi[0].prodmbox, 0);
9217 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9220 /* Make sure the NIC-based send BD rings are disabled. */
9221 if (!tg3_flag(tp, 5705_PLUS)) {
9222 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9223 for (i = 0; i < 16; i++)
9224 tw32_tx_mbox(mbox + i * 8, 0);
9227 txrcb = NIC_SRAM_SEND_RCB;
9228 rxrcb = NIC_SRAM_RCV_RET_RCB;
9230 /* Clear status block in ram. */
9231 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9233 /* Set status block DMA address */
9234 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9235 ((u64) tnapi->status_mapping >> 32));
9236 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9237 ((u64) tnapi->status_mapping & 0xffffffff));
9239 if (tnapi->tx_ring) {
9240 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9241 (TG3_TX_RING_SIZE <<
9242 BDINFO_FLAGS_MAXLEN_SHIFT),
9243 NIC_SRAM_TX_BUFFER_DESC);
9244 txrcb += TG3_BDINFO_SIZE;
9247 if (tnapi->rx_rcb) {
9248 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9249 (tp->rx_ret_ring_mask + 1) <<
9250 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9251 rxrcb += TG3_BDINFO_SIZE;
9254 stblk = HOSTCC_STATBLCK_RING1;
9256 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9257 u64 mapping = (u64)tnapi->status_mapping;
9258 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9259 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9261 /* Clear status block in ram. */
9262 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9264 if (tnapi->tx_ring) {
9265 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9266 (TG3_TX_RING_SIZE <<
9267 BDINFO_FLAGS_MAXLEN_SHIFT),
9268 NIC_SRAM_TX_BUFFER_DESC);
9269 txrcb += TG3_BDINFO_SIZE;
9272 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9273 ((tp->rx_ret_ring_mask + 1) <<
9274 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9277 rxrcb += TG3_BDINFO_SIZE;
9281 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9283 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9285 if (!tg3_flag(tp, 5750_PLUS) ||
9286 tg3_flag(tp, 5780_CLASS) ||
9287 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9288 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9289 tg3_flag(tp, 57765_PLUS))
9290 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9291 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9292 tg3_asic_rev(tp) == ASIC_REV_5787)
9293 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9295 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9297 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9298 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9300 val = min(nic_rep_thresh, host_rep_thresh);
9301 tw32(RCVBDI_STD_THRESH, val);
9303 if (tg3_flag(tp, 57765_PLUS))
9304 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9306 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9309 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9311 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9313 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9314 tw32(RCVBDI_JUMBO_THRESH, val);
9316 if (tg3_flag(tp, 57765_PLUS))
9317 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9320 static inline u32 calc_crc(unsigned char *buf, int len)
9328 for (j = 0; j < len; j++) {
9331 for (k = 0; k < 8; k++) {
9344 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9346 /* accept or reject all multicast frames */
9347 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9348 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9349 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9350 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9353 static void __tg3_set_rx_mode(struct net_device *dev)
9355 struct tg3 *tp = netdev_priv(dev);
9358 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9359 RX_MODE_KEEP_VLAN_TAG);
9361 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9362 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9365 if (!tg3_flag(tp, ENABLE_ASF))
9366 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9369 if (dev->flags & IFF_PROMISC) {
9370 /* Promiscuous mode. */
9371 rx_mode |= RX_MODE_PROMISC;
9372 } else if (dev->flags & IFF_ALLMULTI) {
9373 /* Accept all multicast. */
9374 tg3_set_multi(tp, 1);
9375 } else if (netdev_mc_empty(dev)) {
9376 /* Reject all multicast. */
9377 tg3_set_multi(tp, 0);
9379 /* Accept one or more multicast(s). */
9380 struct netdev_hw_addr *ha;
9381 u32 mc_filter[4] = { 0, };
9386 netdev_for_each_mc_addr(ha, dev) {
9387 crc = calc_crc(ha->addr, ETH_ALEN);
9389 regidx = (bit & 0x60) >> 5;
9391 mc_filter[regidx] |= (1 << bit);
9394 tw32(MAC_HASH_REG_0, mc_filter[0]);
9395 tw32(MAC_HASH_REG_1, mc_filter[1]);
9396 tw32(MAC_HASH_REG_2, mc_filter[2]);
9397 tw32(MAC_HASH_REG_3, mc_filter[3]);
9400 if (rx_mode != tp->rx_mode) {
9401 tp->rx_mode = rx_mode;
9402 tw32_f(MAC_RX_MODE, rx_mode);
9407 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9411 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9412 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9415 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9419 if (!tg3_flag(tp, SUPPORT_MSIX))
9422 if (tp->rxq_cnt == 1) {
9423 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9427 /* Validate table against current IRQ count */
9428 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9429 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9433 if (i != TG3_RSS_INDIR_TBL_SIZE)
9434 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9437 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9440 u32 reg = MAC_RSS_INDIR_TBL_0;
9442 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9443 u32 val = tp->rss_ind_tbl[i];
9445 for (; i % 8; i++) {
9447 val |= tp->rss_ind_tbl[i];
9454 /* tp->lock is held. */
9455 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9457 u32 val, rdmac_mode;
9459 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9461 tg3_disable_ints(tp);
9465 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9467 if (tg3_flag(tp, INIT_COMPLETE))
9468 tg3_abort_hw(tp, 1);
9470 /* Enable MAC control of LPI */
9471 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9472 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9473 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9474 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9475 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9477 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9479 tw32_f(TG3_CPMU_EEE_CTRL,
9480 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9482 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9483 TG3_CPMU_EEEMD_LPI_IN_TX |
9484 TG3_CPMU_EEEMD_LPI_IN_RX |
9485 TG3_CPMU_EEEMD_EEE_ENABLE;
9487 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9488 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9490 if (tg3_flag(tp, ENABLE_APE))
9491 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9493 tw32_f(TG3_CPMU_EEE_MODE, val);
9495 tw32_f(TG3_CPMU_EEE_DBTMR1,
9496 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9497 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9499 tw32_f(TG3_CPMU_EEE_DBTMR2,
9500 TG3_CPMU_DBTMR2_APE_TX_2047US |
9501 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9504 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9505 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9506 tg3_phy_pull_config(tp);
9507 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9513 err = tg3_chip_reset(tp);
9517 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9519 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9520 val = tr32(TG3_CPMU_CTRL);
9521 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9522 tw32(TG3_CPMU_CTRL, val);
9524 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9525 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9526 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9527 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9529 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9530 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9531 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9532 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9534 val = tr32(TG3_CPMU_HST_ACC);
9535 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9536 val |= CPMU_HST_ACC_MACCLK_6_25;
9537 tw32(TG3_CPMU_HST_ACC, val);
9540 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9541 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9542 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9543 PCIE_PWR_MGMT_L1_THRESH_4MS;
9544 tw32(PCIE_PWR_MGMT_THRESH, val);
9546 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9547 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9549 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9551 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9552 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9555 if (tg3_flag(tp, L1PLLPD_EN)) {
9556 u32 grc_mode = tr32(GRC_MODE);
9558 /* Access the lower 1K of PL PCIE block registers. */
9559 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9560 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9562 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9563 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9564 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9566 tw32(GRC_MODE, grc_mode);
9569 if (tg3_flag(tp, 57765_CLASS)) {
9570 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9571 u32 grc_mode = tr32(GRC_MODE);
9573 /* Access the lower 1K of PL PCIE block registers. */
9574 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9575 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9577 val = tr32(TG3_PCIE_TLDLPL_PORT +
9578 TG3_PCIE_PL_LO_PHYCTL5);
9579 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9580 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9582 tw32(GRC_MODE, grc_mode);
9585 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9588 /* Fix transmit hangs */
9589 val = tr32(TG3_CPMU_PADRNG_CTL);
9590 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9591 tw32(TG3_CPMU_PADRNG_CTL, val);
9593 grc_mode = tr32(GRC_MODE);
9595 /* Access the lower 1K of DL PCIE block registers. */
9596 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9597 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9599 val = tr32(TG3_PCIE_TLDLPL_PORT +
9600 TG3_PCIE_DL_LO_FTSMAX);
9601 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9602 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9603 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9605 tw32(GRC_MODE, grc_mode);
9608 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9609 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9610 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9611 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9614 /* This works around an issue with Athlon chipsets on
9615 * B3 tigon3 silicon. This bit has no effect on any
9616 * other revision. But do not set this on PCI Express
9617 * chips and don't even touch the clocks if the CPMU is present.
9619 if (!tg3_flag(tp, CPMU_PRESENT)) {
9620 if (!tg3_flag(tp, PCI_EXPRESS))
9621 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9622 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9625 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9626 tg3_flag(tp, PCIX_MODE)) {
9627 val = tr32(TG3PCI_PCISTATE);
9628 val |= PCISTATE_RETRY_SAME_DMA;
9629 tw32(TG3PCI_PCISTATE, val);
9632 if (tg3_flag(tp, ENABLE_APE)) {
9633 /* Allow reads and writes to the
9634 * APE register and memory space.
9636 val = tr32(TG3PCI_PCISTATE);
9637 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9638 PCISTATE_ALLOW_APE_SHMEM_WR |
9639 PCISTATE_ALLOW_APE_PSPACE_WR;
9640 tw32(TG3PCI_PCISTATE, val);
9643 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9644 /* Enable some hw fixes. */
9645 val = tr32(TG3PCI_MSI_DATA);
9646 val |= (1 << 26) | (1 << 28) | (1 << 29);
9647 tw32(TG3PCI_MSI_DATA, val);
9650 /* Descriptor ring init may make accesses to the
9651 * NIC SRAM area to setup the TX descriptors, so we
9652 * can only do this after the hardware has been
9653 * successfully reset.
9655 err = tg3_init_rings(tp);
9659 if (tg3_flag(tp, 57765_PLUS)) {
9660 val = tr32(TG3PCI_DMA_RW_CTRL) &
9661 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9662 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9663 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9664 if (!tg3_flag(tp, 57765_CLASS) &&
9665 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9666 tg3_asic_rev(tp) != ASIC_REV_5762)
9667 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9668 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9669 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9670 tg3_asic_rev(tp) != ASIC_REV_5761) {
9671 /* This value is determined during the probe time DMA
9672 * engine test, tg3_test_dma.
9674 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9677 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9678 GRC_MODE_4X_NIC_SEND_RINGS |
9679 GRC_MODE_NO_TX_PHDR_CSUM |
9680 GRC_MODE_NO_RX_PHDR_CSUM);
9681 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9683 /* Pseudo-header checksum is done by hardware logic and not
9684 * the offload processers, so make the chip do the pseudo-
9685 * header checksums on receive. For transmit it is more
9686 * convenient to do the pseudo-header checksum in software
9687 * as Linux does that on transmit for us in all cases.
9689 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9691 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9693 tw32(TG3_RX_PTP_CTL,
9694 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9696 if (tg3_flag(tp, PTP_CAPABLE))
9697 val |= GRC_MODE_TIME_SYNC_ENABLE;
9699 tw32(GRC_MODE, tp->grc_mode | val);
9701 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9702 val = tr32(GRC_MISC_CFG);
9704 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9705 tw32(GRC_MISC_CFG, val);
9707 /* Initialize MBUF/DESC pool. */
9708 if (tg3_flag(tp, 5750_PLUS)) {
9710 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9711 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9712 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9713 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9715 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9716 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9717 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9718 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9721 fw_len = tp->fw_len;
9722 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9723 tw32(BUFMGR_MB_POOL_ADDR,
9724 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9725 tw32(BUFMGR_MB_POOL_SIZE,
9726 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9729 if (tp->dev->mtu <= ETH_DATA_LEN) {
9730 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9731 tp->bufmgr_config.mbuf_read_dma_low_water);
9732 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9733 tp->bufmgr_config.mbuf_mac_rx_low_water);
9734 tw32(BUFMGR_MB_HIGH_WATER,
9735 tp->bufmgr_config.mbuf_high_water);
9737 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9738 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9739 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9740 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9741 tw32(BUFMGR_MB_HIGH_WATER,
9742 tp->bufmgr_config.mbuf_high_water_jumbo);
9744 tw32(BUFMGR_DMA_LOW_WATER,
9745 tp->bufmgr_config.dma_low_water);
9746 tw32(BUFMGR_DMA_HIGH_WATER,
9747 tp->bufmgr_config.dma_high_water);
9749 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9750 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9751 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9752 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9753 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9754 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9755 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9756 tw32(BUFMGR_MODE, val);
9757 for (i = 0; i < 2000; i++) {
9758 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9763 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9767 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9768 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9770 tg3_setup_rxbd_thresholds(tp);
9772 /* Initialize TG3_BDINFO's at:
9773 * RCVDBDI_STD_BD: standard eth size rx ring
9774 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9775 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9778 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9779 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9780 * ring attribute flags
9781 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9783 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9784 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9786 * The size of each ring is fixed in the firmware, but the location is
9789 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9790 ((u64) tpr->rx_std_mapping >> 32));
9791 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9792 ((u64) tpr->rx_std_mapping & 0xffffffff));
9793 if (!tg3_flag(tp, 5717_PLUS))
9794 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9795 NIC_SRAM_RX_BUFFER_DESC);
9797 /* Disable the mini ring */
9798 if (!tg3_flag(tp, 5705_PLUS))
9799 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9800 BDINFO_FLAGS_DISABLED);
9802 /* Program the jumbo buffer descriptor ring control
9803 * blocks on those devices that have them.
9805 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9806 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9808 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9809 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9810 ((u64) tpr->rx_jmb_mapping >> 32));
9811 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9812 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9813 val = TG3_RX_JMB_RING_SIZE(tp) <<
9814 BDINFO_FLAGS_MAXLEN_SHIFT;
9815 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9816 val | BDINFO_FLAGS_USE_EXT_RECV);
9817 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9818 tg3_flag(tp, 57765_CLASS) ||
9819 tg3_asic_rev(tp) == ASIC_REV_5762)
9820 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9821 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9823 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9824 BDINFO_FLAGS_DISABLED);
9827 if (tg3_flag(tp, 57765_PLUS)) {
9828 val = TG3_RX_STD_RING_SIZE(tp);
9829 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9830 val |= (TG3_RX_STD_DMA_SZ << 2);
9832 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9834 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9836 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9838 tpr->rx_std_prod_idx = tp->rx_pending;
9839 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9841 tpr->rx_jmb_prod_idx =
9842 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9843 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9845 tg3_rings_reset(tp);
9847 /* Initialize MAC address and backoff seed. */
9848 __tg3_set_mac_addr(tp, false);
9850 /* MTU + ethernet header + FCS + optional VLAN tag */
9851 tw32(MAC_RX_MTU_SIZE,
9852 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9854 /* The slot time is changed by tg3_setup_phy if we
9855 * run at gigabit with half duplex.
9857 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9858 (6 << TX_LENGTHS_IPG_SHIFT) |
9859 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9861 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9862 tg3_asic_rev(tp) == ASIC_REV_5762)
9863 val |= tr32(MAC_TX_LENGTHS) &
9864 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9865 TX_LENGTHS_CNT_DWN_VAL_MSK);
9867 tw32(MAC_TX_LENGTHS, val);
9869 /* Receive rules. */
9870 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9871 tw32(RCVLPC_CONFIG, 0x0181);
9873 /* Calculate RDMAC_MODE setting early, we need it to determine
9874 * the RCVLPC_STATE_ENABLE mask.
9876 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9877 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9878 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9879 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9880 RDMAC_MODE_LNGREAD_ENAB);
9882 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9883 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9885 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9886 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9887 tg3_asic_rev(tp) == ASIC_REV_57780)
9888 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9889 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9890 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9892 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9893 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9894 if (tg3_flag(tp, TSO_CAPABLE) &&
9895 tg3_asic_rev(tp) == ASIC_REV_5705) {
9896 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9897 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9898 !tg3_flag(tp, IS_5788)) {
9899 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9903 if (tg3_flag(tp, PCI_EXPRESS))
9904 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9906 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9908 if (tp->dev->mtu <= ETH_DATA_LEN) {
9909 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9910 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9914 if (tg3_flag(tp, HW_TSO_1) ||
9915 tg3_flag(tp, HW_TSO_2) ||
9916 tg3_flag(tp, HW_TSO_3))
9917 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9919 if (tg3_flag(tp, 57765_PLUS) ||
9920 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9921 tg3_asic_rev(tp) == ASIC_REV_57780)
9922 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9924 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9925 tg3_asic_rev(tp) == ASIC_REV_5762)
9926 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9928 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9929 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9930 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9931 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9932 tg3_flag(tp, 57765_PLUS)) {
9935 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9936 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9938 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9941 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9942 tg3_asic_rev(tp) == ASIC_REV_5762) {
9943 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9944 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9945 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9946 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9947 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9948 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9950 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9953 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9954 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9955 tg3_asic_rev(tp) == ASIC_REV_5762) {
9958 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9959 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9961 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9965 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9966 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9969 /* Receive/send statistics. */
9970 if (tg3_flag(tp, 5750_PLUS)) {
9971 val = tr32(RCVLPC_STATS_ENABLE);
9972 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9973 tw32(RCVLPC_STATS_ENABLE, val);
9974 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9975 tg3_flag(tp, TSO_CAPABLE)) {
9976 val = tr32(RCVLPC_STATS_ENABLE);
9977 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9978 tw32(RCVLPC_STATS_ENABLE, val);
9980 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9982 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9983 tw32(SNDDATAI_STATSENAB, 0xffffff);
9984 tw32(SNDDATAI_STATSCTRL,
9985 (SNDDATAI_SCTRL_ENABLE |
9986 SNDDATAI_SCTRL_FASTUPD));
9988 /* Setup host coalescing engine. */
9989 tw32(HOSTCC_MODE, 0);
9990 for (i = 0; i < 2000; i++) {
9991 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9996 __tg3_set_coalesce(tp, &tp->coal);
9998 if (!tg3_flag(tp, 5705_PLUS)) {
9999 /* Status/statistics block address. See tg3_timer,
10000 * the tg3_periodic_fetch_stats call there, and
10001 * tg3_get_stats to see how this works for 5705/5750 chips.
10003 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10004 ((u64) tp->stats_mapping >> 32));
10005 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10006 ((u64) tp->stats_mapping & 0xffffffff));
10007 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10009 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10011 /* Clear statistics and status block memory areas */
10012 for (i = NIC_SRAM_STATS_BLK;
10013 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10014 i += sizeof(u32)) {
10015 tg3_write_mem(tp, i, 0);
10020 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10022 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10023 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10024 if (!tg3_flag(tp, 5705_PLUS))
10025 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10027 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10028 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10029 /* reset to prevent losing 1st rx packet intermittently */
10030 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10034 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10035 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10036 MAC_MODE_FHDE_ENABLE;
10037 if (tg3_flag(tp, ENABLE_APE))
10038 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10039 if (!tg3_flag(tp, 5705_PLUS) &&
10040 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10041 tg3_asic_rev(tp) != ASIC_REV_5700)
10042 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10043 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10046 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10047 * If TG3_FLAG_IS_NIC is zero, we should read the
10048 * register to preserve the GPIO settings for LOMs. The GPIOs,
10049 * whether used as inputs or outputs, are set by boot code after
10052 if (!tg3_flag(tp, IS_NIC)) {
10055 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10056 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10057 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10059 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10060 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10061 GRC_LCLCTRL_GPIO_OUTPUT3;
10063 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10064 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10066 tp->grc_local_ctrl &= ~gpio_mask;
10067 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10069 /* GPIO1 must be driven high for eeprom write protect */
10070 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10071 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10072 GRC_LCLCTRL_GPIO_OUTPUT1);
10074 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10077 if (tg3_flag(tp, USING_MSIX)) {
10078 val = tr32(MSGINT_MODE);
10079 val |= MSGINT_MODE_ENABLE;
10080 if (tp->irq_cnt > 1)
10081 val |= MSGINT_MODE_MULTIVEC_EN;
10082 if (!tg3_flag(tp, 1SHOT_MSI))
10083 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10084 tw32(MSGINT_MODE, val);
10087 if (!tg3_flag(tp, 5705_PLUS)) {
10088 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10092 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10093 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10094 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10095 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10096 WDMAC_MODE_LNGREAD_ENAB);
10098 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10099 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10100 if (tg3_flag(tp, TSO_CAPABLE) &&
10101 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10102 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10104 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10105 !tg3_flag(tp, IS_5788)) {
10106 val |= WDMAC_MODE_RX_ACCEL;
10110 /* Enable host coalescing bug fix */
10111 if (tg3_flag(tp, 5755_PLUS))
10112 val |= WDMAC_MODE_STATUS_TAG_FIX;
10114 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10115 val |= WDMAC_MODE_BURST_ALL_DATA;
10117 tw32_f(WDMAC_MODE, val);
10120 if (tg3_flag(tp, PCIX_MODE)) {
10123 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10125 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10126 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10127 pcix_cmd |= PCI_X_CMD_READ_2K;
10128 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10129 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10130 pcix_cmd |= PCI_X_CMD_READ_2K;
10132 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10136 tw32_f(RDMAC_MODE, rdmac_mode);
10139 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
10140 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10141 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10144 if (i < TG3_NUM_RDMA_CHANNELS) {
10145 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10146 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
10147 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10148 tg3_flag_set(tp, 5719_RDMA_BUG);
10152 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10153 if (!tg3_flag(tp, 5705_PLUS))
10154 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10156 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10157 tw32(SNDDATAC_MODE,
10158 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10160 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10162 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10163 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10164 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10165 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10166 val |= RCVDBDI_MODE_LRG_RING_SZ;
10167 tw32(RCVDBDI_MODE, val);
10168 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10169 if (tg3_flag(tp, HW_TSO_1) ||
10170 tg3_flag(tp, HW_TSO_2) ||
10171 tg3_flag(tp, HW_TSO_3))
10172 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10173 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10174 if (tg3_flag(tp, ENABLE_TSS))
10175 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10176 tw32(SNDBDI_MODE, val);
10177 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10179 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10180 err = tg3_load_5701_a0_firmware_fix(tp);
10185 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10186 /* Ignore any errors for the firmware download. If download
10187 * fails, the device will operate with EEE disabled
10189 tg3_load_57766_firmware(tp);
10192 if (tg3_flag(tp, TSO_CAPABLE)) {
10193 err = tg3_load_tso_firmware(tp);
10198 tp->tx_mode = TX_MODE_ENABLE;
10200 if (tg3_flag(tp, 5755_PLUS) ||
10201 tg3_asic_rev(tp) == ASIC_REV_5906)
10202 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10204 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10205 tg3_asic_rev(tp) == ASIC_REV_5762) {
10206 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10207 tp->tx_mode &= ~val;
10208 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10211 tw32_f(MAC_TX_MODE, tp->tx_mode);
10214 if (tg3_flag(tp, ENABLE_RSS)) {
10215 tg3_rss_write_indir_tbl(tp);
10217 /* Setup the "secret" hash key. */
10218 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10219 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10220 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10221 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10222 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10223 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10224 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10225 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10226 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10227 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10230 tp->rx_mode = RX_MODE_ENABLE;
10231 if (tg3_flag(tp, 5755_PLUS))
10232 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10234 if (tg3_flag(tp, ENABLE_RSS))
10235 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10236 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10237 RX_MODE_RSS_IPV6_HASH_EN |
10238 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10239 RX_MODE_RSS_IPV4_HASH_EN |
10240 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10242 tw32_f(MAC_RX_MODE, tp->rx_mode);
10245 tw32(MAC_LED_CTRL, tp->led_ctrl);
10247 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10248 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10249 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10252 tw32_f(MAC_RX_MODE, tp->rx_mode);
10255 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10256 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10257 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10258 /* Set drive transmission level to 1.2V */
10259 /* only if the signal pre-emphasis bit is not set */
10260 val = tr32(MAC_SERDES_CFG);
10263 tw32(MAC_SERDES_CFG, val);
10265 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10266 tw32(MAC_SERDES_CFG, 0x616000);
10269 /* Prevent chip from dropping frames when flow control
10272 if (tg3_flag(tp, 57765_CLASS))
10276 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10278 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10279 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10280 /* Use hardware link auto-negotiation */
10281 tg3_flag_set(tp, HW_AUTONEG);
10284 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10285 tg3_asic_rev(tp) == ASIC_REV_5714) {
10288 tmp = tr32(SERDES_RX_CTRL);
10289 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10290 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10291 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10292 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10295 if (!tg3_flag(tp, USE_PHYLIB)) {
10296 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10297 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10299 err = tg3_setup_phy(tp, false);
10303 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10304 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10307 /* Clear CRC stats. */
10308 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10309 tg3_writephy(tp, MII_TG3_TEST1,
10310 tmp | MII_TG3_TEST1_CRC_EN);
10311 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10316 __tg3_set_rx_mode(tp->dev);
10318 /* Initialize receive rules. */
10319 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10320 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10321 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10322 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10324 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10328 if (tg3_flag(tp, ENABLE_ASF))
10332 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10334 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10336 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10338 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10340 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10342 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10344 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10346 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10348 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10350 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10352 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10354 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10356 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10358 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10366 if (tg3_flag(tp, ENABLE_APE))
10367 /* Write our heartbeat update interval to APE. */
10368 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10369 APE_HOST_HEARTBEAT_INT_DISABLE);
10371 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10376 /* Called at device open time to get the chip ready for
10377 * packet processing. Invoked with tp->lock held.
10379 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10381 tg3_switch_clocks(tp);
10383 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10385 return tg3_reset_hw(tp, reset_phy);
10388 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10392 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10393 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10395 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10398 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10399 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10400 memset(ocir, 0, TG3_OCIR_LEN);
10404 /* sysfs attributes for hwmon */
10405 static ssize_t tg3_show_temp(struct device *dev,
10406 struct device_attribute *devattr, char *buf)
10408 struct pci_dev *pdev = to_pci_dev(dev);
10409 struct net_device *netdev = pci_get_drvdata(pdev);
10410 struct tg3 *tp = netdev_priv(netdev);
10411 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10414 spin_lock_bh(&tp->lock);
10415 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10416 sizeof(temperature));
10417 spin_unlock_bh(&tp->lock);
10418 return sprintf(buf, "%u\n", temperature);
10422 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10423 TG3_TEMP_SENSOR_OFFSET);
10424 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10425 TG3_TEMP_CAUTION_OFFSET);
10426 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10427 TG3_TEMP_MAX_OFFSET);
10429 static struct attribute *tg3_attributes[] = {
10430 &sensor_dev_attr_temp1_input.dev_attr.attr,
10431 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10432 &sensor_dev_attr_temp1_max.dev_attr.attr,
10436 static const struct attribute_group tg3_group = {
10437 .attrs = tg3_attributes,
10440 static void tg3_hwmon_close(struct tg3 *tp)
10442 if (tp->hwmon_dev) {
10443 hwmon_device_unregister(tp->hwmon_dev);
10444 tp->hwmon_dev = NULL;
10445 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10449 static void tg3_hwmon_open(struct tg3 *tp)
10453 struct pci_dev *pdev = tp->pdev;
10454 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10456 tg3_sd_scan_scratchpad(tp, ocirs);
10458 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10459 if (!ocirs[i].src_data_length)
10462 size += ocirs[i].src_hdr_length;
10463 size += ocirs[i].src_data_length;
10469 /* Register hwmon sysfs hooks */
10470 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10472 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10476 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10477 if (IS_ERR(tp->hwmon_dev)) {
10478 tp->hwmon_dev = NULL;
10479 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10480 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10485 #define TG3_STAT_ADD32(PSTAT, REG) \
10486 do { u32 __val = tr32(REG); \
10487 (PSTAT)->low += __val; \
10488 if ((PSTAT)->low < __val) \
10489 (PSTAT)->high += 1; \
10492 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10494 struct tg3_hw_stats *sp = tp->hw_stats;
10499 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10500 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10501 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10502 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10503 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10504 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10505 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10506 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10507 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10508 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10509 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10510 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10511 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10512 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10513 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10514 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10517 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10518 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10519 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10520 tg3_flag_clear(tp, 5719_RDMA_BUG);
10523 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10524 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10525 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10526 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10527 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10528 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10529 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10530 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10531 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10532 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10533 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10534 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10535 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10536 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10538 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10539 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10540 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10541 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10542 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10544 u32 val = tr32(HOSTCC_FLOW_ATTN);
10545 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10547 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10548 sp->rx_discards.low += val;
10549 if (sp->rx_discards.low < val)
10550 sp->rx_discards.high += 1;
10552 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10554 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10557 static void tg3_chk_missed_msi(struct tg3 *tp)
10561 for (i = 0; i < tp->irq_cnt; i++) {
10562 struct tg3_napi *tnapi = &tp->napi[i];
10564 if (tg3_has_work(tnapi)) {
10565 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10566 tnapi->last_tx_cons == tnapi->tx_cons) {
10567 if (tnapi->chk_msi_cnt < 1) {
10568 tnapi->chk_msi_cnt++;
10574 tnapi->chk_msi_cnt = 0;
10575 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10576 tnapi->last_tx_cons = tnapi->tx_cons;
10580 static void tg3_timer(unsigned long __opaque)
10582 struct tg3 *tp = (struct tg3 *) __opaque;
10584 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10585 goto restart_timer;
10587 spin_lock(&tp->lock);
10589 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10590 tg3_flag(tp, 57765_CLASS))
10591 tg3_chk_missed_msi(tp);
10593 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10594 /* BCM4785: Flush posted writes from GbE to host memory. */
10598 if (!tg3_flag(tp, TAGGED_STATUS)) {
10599 /* All of this garbage is because when using non-tagged
10600 * IRQ status the mailbox/status_block protocol the chip
10601 * uses with the cpu is race prone.
10603 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10604 tw32(GRC_LOCAL_CTRL,
10605 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10607 tw32(HOSTCC_MODE, tp->coalesce_mode |
10608 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10611 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10612 spin_unlock(&tp->lock);
10613 tg3_reset_task_schedule(tp);
10614 goto restart_timer;
10618 /* This part only runs once per second. */
10619 if (!--tp->timer_counter) {
10620 if (tg3_flag(tp, 5705_PLUS))
10621 tg3_periodic_fetch_stats(tp);
10623 if (tp->setlpicnt && !--tp->setlpicnt)
10624 tg3_phy_eee_enable(tp);
10626 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10630 mac_stat = tr32(MAC_STATUS);
10633 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10634 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10636 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10640 tg3_setup_phy(tp, false);
10641 } else if (tg3_flag(tp, POLL_SERDES)) {
10642 u32 mac_stat = tr32(MAC_STATUS);
10643 int need_setup = 0;
10646 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10649 if (!tp->link_up &&
10650 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10651 MAC_STATUS_SIGNAL_DET))) {
10655 if (!tp->serdes_counter) {
10658 ~MAC_MODE_PORT_MODE_MASK));
10660 tw32_f(MAC_MODE, tp->mac_mode);
10663 tg3_setup_phy(tp, false);
10665 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10666 tg3_flag(tp, 5780_CLASS)) {
10667 tg3_serdes_parallel_detect(tp);
10670 tp->timer_counter = tp->timer_multiplier;
10673 /* Heartbeat is only sent once every 2 seconds.
10675 * The heartbeat is to tell the ASF firmware that the host
10676 * driver is still alive. In the event that the OS crashes,
10677 * ASF needs to reset the hardware to free up the FIFO space
10678 * that may be filled with rx packets destined for the host.
10679 * If the FIFO is full, ASF will no longer function properly.
10681 * Unintended resets have been reported on real time kernels
10682 * where the timer doesn't run on time. Netpoll will also have
10685 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10686 * to check the ring condition when the heartbeat is expiring
10687 * before doing the reset. This will prevent most unintended
10690 if (!--tp->asf_counter) {
10691 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10692 tg3_wait_for_event_ack(tp);
10694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10695 FWCMD_NICDRV_ALIVE3);
10696 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10697 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10698 TG3_FW_UPDATE_TIMEOUT_SEC);
10700 tg3_generate_fw_event(tp);
10702 tp->asf_counter = tp->asf_multiplier;
10705 spin_unlock(&tp->lock);
10708 tp->timer.expires = jiffies + tp->timer_offset;
10709 add_timer(&tp->timer);
10712 static void tg3_timer_init(struct tg3 *tp)
10714 if (tg3_flag(tp, TAGGED_STATUS) &&
10715 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10716 !tg3_flag(tp, 57765_CLASS))
10717 tp->timer_offset = HZ;
10719 tp->timer_offset = HZ / 10;
10721 BUG_ON(tp->timer_offset > HZ);
10723 tp->timer_multiplier = (HZ / tp->timer_offset);
10724 tp->asf_multiplier = (HZ / tp->timer_offset) *
10725 TG3_FW_UPDATE_FREQ_SEC;
10727 init_timer(&tp->timer);
10728 tp->timer.data = (unsigned long) tp;
10729 tp->timer.function = tg3_timer;
10732 static void tg3_timer_start(struct tg3 *tp)
10734 tp->asf_counter = tp->asf_multiplier;
10735 tp->timer_counter = tp->timer_multiplier;
10737 tp->timer.expires = jiffies + tp->timer_offset;
10738 add_timer(&tp->timer);
10741 static void tg3_timer_stop(struct tg3 *tp)
10743 del_timer_sync(&tp->timer);
10746 /* Restart hardware after configuration changes, self-test, etc.
10747 * Invoked with tp->lock held.
10749 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10750 __releases(tp->lock)
10751 __acquires(tp->lock)
10755 err = tg3_init_hw(tp, reset_phy);
10757 netdev_err(tp->dev,
10758 "Failed to re-initialize device, aborting\n");
10759 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10760 tg3_full_unlock(tp);
10761 tg3_timer_stop(tp);
10763 tg3_napi_enable(tp);
10764 dev_close(tp->dev);
10765 tg3_full_lock(tp, 0);
10770 static void tg3_reset_task(struct work_struct *work)
10772 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10775 tg3_full_lock(tp, 0);
10777 if (!netif_running(tp->dev)) {
10778 tg3_flag_clear(tp, RESET_TASK_PENDING);
10779 tg3_full_unlock(tp);
10783 tg3_full_unlock(tp);
10787 tg3_netif_stop(tp);
10789 tg3_full_lock(tp, 1);
10791 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10792 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10793 tp->write32_rx_mbox = tg3_write_flush_reg32;
10794 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10795 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10798 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10799 err = tg3_init_hw(tp, true);
10803 tg3_netif_start(tp);
10806 tg3_full_unlock(tp);
10811 tg3_flag_clear(tp, RESET_TASK_PENDING);
10814 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10817 unsigned long flags;
10819 struct tg3_napi *tnapi = &tp->napi[irq_num];
10821 if (tp->irq_cnt == 1)
10822 name = tp->dev->name;
10824 name = &tnapi->irq_lbl[0];
10825 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10826 name[IFNAMSIZ-1] = 0;
10829 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10831 if (tg3_flag(tp, 1SHOT_MSI))
10832 fn = tg3_msi_1shot;
10835 fn = tg3_interrupt;
10836 if (tg3_flag(tp, TAGGED_STATUS))
10837 fn = tg3_interrupt_tagged;
10838 flags = IRQF_SHARED;
10841 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10844 static int tg3_test_interrupt(struct tg3 *tp)
10846 struct tg3_napi *tnapi = &tp->napi[0];
10847 struct net_device *dev = tp->dev;
10848 int err, i, intr_ok = 0;
10851 if (!netif_running(dev))
10854 tg3_disable_ints(tp);
10856 free_irq(tnapi->irq_vec, tnapi);
10859 * Turn off MSI one shot mode. Otherwise this test has no
10860 * observable way to know whether the interrupt was delivered.
10862 if (tg3_flag(tp, 57765_PLUS)) {
10863 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10864 tw32(MSGINT_MODE, val);
10867 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10868 IRQF_SHARED, dev->name, tnapi);
10872 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10873 tg3_enable_ints(tp);
10875 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10878 for (i = 0; i < 5; i++) {
10879 u32 int_mbox, misc_host_ctrl;
10881 int_mbox = tr32_mailbox(tnapi->int_mbox);
10882 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10884 if ((int_mbox != 0) ||
10885 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10890 if (tg3_flag(tp, 57765_PLUS) &&
10891 tnapi->hw_status->status_tag != tnapi->last_tag)
10892 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10897 tg3_disable_ints(tp);
10899 free_irq(tnapi->irq_vec, tnapi);
10901 err = tg3_request_irq(tp, 0);
10907 /* Reenable MSI one shot mode. */
10908 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10909 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10910 tw32(MSGINT_MODE, val);
10918 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10919 * successfully restored
10921 static int tg3_test_msi(struct tg3 *tp)
10926 if (!tg3_flag(tp, USING_MSI))
10929 /* Turn off SERR reporting in case MSI terminates with Master
10932 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10933 pci_write_config_word(tp->pdev, PCI_COMMAND,
10934 pci_cmd & ~PCI_COMMAND_SERR);
10936 err = tg3_test_interrupt(tp);
10938 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10943 /* other failures */
10947 /* MSI test failed, go back to INTx mode */
10948 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10949 "to INTx mode. Please report this failure to the PCI "
10950 "maintainer and include system chipset information\n");
10952 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10954 pci_disable_msi(tp->pdev);
10956 tg3_flag_clear(tp, USING_MSI);
10957 tp->napi[0].irq_vec = tp->pdev->irq;
10959 err = tg3_request_irq(tp, 0);
10963 /* Need to reset the chip because the MSI cycle may have terminated
10964 * with Master Abort.
10966 tg3_full_lock(tp, 1);
10968 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10969 err = tg3_init_hw(tp, true);
10971 tg3_full_unlock(tp);
10974 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10979 static int tg3_request_firmware(struct tg3 *tp)
10981 const struct tg3_firmware_hdr *fw_hdr;
10983 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10984 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10989 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10991 /* Firmware blob starts with version numbers, followed by
10992 * start address and _full_ length including BSS sections
10993 * (which must be longer than the actual data, of course
10996 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10997 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10998 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10999 tp->fw_len, tp->fw_needed);
11000 release_firmware(tp->fw);
11005 /* We no longer need firmware; we have it. */
11006 tp->fw_needed = NULL;
11010 static u32 tg3_irq_count(struct tg3 *tp)
11012 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11015 /* We want as many rx rings enabled as there are cpus.
11016 * In multiqueue MSI-X mode, the first MSI-X vector
11017 * only deals with link interrupts, etc, so we add
11018 * one to the number of vectors we are requesting.
11020 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11026 static bool tg3_enable_msix(struct tg3 *tp)
11029 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11031 tp->txq_cnt = tp->txq_req;
11032 tp->rxq_cnt = tp->rxq_req;
11034 tp->rxq_cnt = netif_get_num_default_rss_queues();
11035 if (tp->rxq_cnt > tp->rxq_max)
11036 tp->rxq_cnt = tp->rxq_max;
11038 /* Disable multiple TX rings by default. Simple round-robin hardware
11039 * scheduling of the TX rings can cause starvation of rings with
11040 * small packets when other rings have TSO or jumbo packets.
11045 tp->irq_cnt = tg3_irq_count(tp);
11047 for (i = 0; i < tp->irq_max; i++) {
11048 msix_ent[i].entry = i;
11049 msix_ent[i].vector = 0;
11052 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11055 } else if (rc != 0) {
11056 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11058 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11061 tp->rxq_cnt = max(rc - 1, 1);
11063 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11066 for (i = 0; i < tp->irq_max; i++)
11067 tp->napi[i].irq_vec = msix_ent[i].vector;
11069 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11070 pci_disable_msix(tp->pdev);
11074 if (tp->irq_cnt == 1)
11077 tg3_flag_set(tp, ENABLE_RSS);
11079 if (tp->txq_cnt > 1)
11080 tg3_flag_set(tp, ENABLE_TSS);
11082 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11087 static void tg3_ints_init(struct tg3 *tp)
11089 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11090 !tg3_flag(tp, TAGGED_STATUS)) {
11091 /* All MSI supporting chips should support tagged
11092 * status. Assert that this is the case.
11094 netdev_warn(tp->dev,
11095 "MSI without TAGGED_STATUS? Not using MSI\n");
11099 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11100 tg3_flag_set(tp, USING_MSIX);
11101 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11102 tg3_flag_set(tp, USING_MSI);
11104 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11105 u32 msi_mode = tr32(MSGINT_MODE);
11106 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11107 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11108 if (!tg3_flag(tp, 1SHOT_MSI))
11109 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11110 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11113 if (!tg3_flag(tp, USING_MSIX)) {
11115 tp->napi[0].irq_vec = tp->pdev->irq;
11118 if (tp->irq_cnt == 1) {
11121 netif_set_real_num_tx_queues(tp->dev, 1);
11122 netif_set_real_num_rx_queues(tp->dev, 1);
11126 static void tg3_ints_fini(struct tg3 *tp)
11128 if (tg3_flag(tp, USING_MSIX))
11129 pci_disable_msix(tp->pdev);
11130 else if (tg3_flag(tp, USING_MSI))
11131 pci_disable_msi(tp->pdev);
11132 tg3_flag_clear(tp, USING_MSI);
11133 tg3_flag_clear(tp, USING_MSIX);
11134 tg3_flag_clear(tp, ENABLE_RSS);
11135 tg3_flag_clear(tp, ENABLE_TSS);
11138 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11141 struct net_device *dev = tp->dev;
11145 * Setup interrupts first so we know how
11146 * many NAPI resources to allocate
11150 tg3_rss_check_indir_tbl(tp);
11152 /* The placement of this call is tied
11153 * to the setup and use of Host TX descriptors.
11155 err = tg3_alloc_consistent(tp);
11161 tg3_napi_enable(tp);
11163 for (i = 0; i < tp->irq_cnt; i++) {
11164 struct tg3_napi *tnapi = &tp->napi[i];
11165 err = tg3_request_irq(tp, i);
11167 for (i--; i >= 0; i--) {
11168 tnapi = &tp->napi[i];
11169 free_irq(tnapi->irq_vec, tnapi);
11175 tg3_full_lock(tp, 0);
11177 err = tg3_init_hw(tp, reset_phy);
11179 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11180 tg3_free_rings(tp);
11183 tg3_full_unlock(tp);
11188 if (test_irq && tg3_flag(tp, USING_MSI)) {
11189 err = tg3_test_msi(tp);
11192 tg3_full_lock(tp, 0);
11193 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11194 tg3_free_rings(tp);
11195 tg3_full_unlock(tp);
11200 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11201 u32 val = tr32(PCIE_TRANSACTION_CFG);
11203 tw32(PCIE_TRANSACTION_CFG,
11204 val | PCIE_TRANS_CFG_1SHOT_MSI);
11210 tg3_hwmon_open(tp);
11212 tg3_full_lock(tp, 0);
11214 tg3_timer_start(tp);
11215 tg3_flag_set(tp, INIT_COMPLETE);
11216 tg3_enable_ints(tp);
11221 tg3_ptp_resume(tp);
11224 tg3_full_unlock(tp);
11226 netif_tx_start_all_queues(dev);
11229 * Reset loopback feature if it was turned on while the device was down
11230 * make sure that it's installed properly now.
11232 if (dev->features & NETIF_F_LOOPBACK)
11233 tg3_set_loopback(dev, dev->features);
11238 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11239 struct tg3_napi *tnapi = &tp->napi[i];
11240 free_irq(tnapi->irq_vec, tnapi);
11244 tg3_napi_disable(tp);
11246 tg3_free_consistent(tp);
11254 static void tg3_stop(struct tg3 *tp)
11258 tg3_reset_task_cancel(tp);
11259 tg3_netif_stop(tp);
11261 tg3_timer_stop(tp);
11263 tg3_hwmon_close(tp);
11267 tg3_full_lock(tp, 1);
11269 tg3_disable_ints(tp);
11271 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11272 tg3_free_rings(tp);
11273 tg3_flag_clear(tp, INIT_COMPLETE);
11275 tg3_full_unlock(tp);
11277 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11278 struct tg3_napi *tnapi = &tp->napi[i];
11279 free_irq(tnapi->irq_vec, tnapi);
11286 tg3_free_consistent(tp);
11289 static int tg3_open(struct net_device *dev)
11291 struct tg3 *tp = netdev_priv(dev);
11294 if (tp->fw_needed) {
11295 err = tg3_request_firmware(tp);
11296 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11298 netdev_warn(tp->dev, "EEE capability disabled\n");
11299 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11300 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11301 netdev_warn(tp->dev, "EEE capability restored\n");
11302 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11304 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11308 netdev_warn(tp->dev, "TSO capability disabled\n");
11309 tg3_flag_clear(tp, TSO_CAPABLE);
11310 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11311 netdev_notice(tp->dev, "TSO capability restored\n");
11312 tg3_flag_set(tp, TSO_CAPABLE);
11316 tg3_carrier_off(tp);
11318 err = tg3_power_up(tp);
11322 tg3_full_lock(tp, 0);
11324 tg3_disable_ints(tp);
11325 tg3_flag_clear(tp, INIT_COMPLETE);
11327 tg3_full_unlock(tp);
11329 err = tg3_start(tp,
11330 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11333 tg3_frob_aux_power(tp, false);
11334 pci_set_power_state(tp->pdev, PCI_D3hot);
11337 if (tg3_flag(tp, PTP_CAPABLE)) {
11338 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11340 if (IS_ERR(tp->ptp_clock))
11341 tp->ptp_clock = NULL;
11347 static int tg3_close(struct net_device *dev)
11349 struct tg3 *tp = netdev_priv(dev);
11355 /* Clear stats across close / open calls */
11356 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11357 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11359 tg3_power_down(tp);
11361 tg3_carrier_off(tp);
11366 static inline u64 get_stat64(tg3_stat64_t *val)
11368 return ((u64)val->high << 32) | ((u64)val->low);
11371 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11373 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11375 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11376 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11377 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11380 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11381 tg3_writephy(tp, MII_TG3_TEST1,
11382 val | MII_TG3_TEST1_CRC_EN);
11383 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11387 tp->phy_crc_errors += val;
11389 return tp->phy_crc_errors;
11392 return get_stat64(&hw_stats->rx_fcs_errors);
11395 #define ESTAT_ADD(member) \
11396 estats->member = old_estats->member + \
11397 get_stat64(&hw_stats->member)
11399 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11401 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11402 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11404 ESTAT_ADD(rx_octets);
11405 ESTAT_ADD(rx_fragments);
11406 ESTAT_ADD(rx_ucast_packets);
11407 ESTAT_ADD(rx_mcast_packets);
11408 ESTAT_ADD(rx_bcast_packets);
11409 ESTAT_ADD(rx_fcs_errors);
11410 ESTAT_ADD(rx_align_errors);
11411 ESTAT_ADD(rx_xon_pause_rcvd);
11412 ESTAT_ADD(rx_xoff_pause_rcvd);
11413 ESTAT_ADD(rx_mac_ctrl_rcvd);
11414 ESTAT_ADD(rx_xoff_entered);
11415 ESTAT_ADD(rx_frame_too_long_errors);
11416 ESTAT_ADD(rx_jabbers);
11417 ESTAT_ADD(rx_undersize_packets);
11418 ESTAT_ADD(rx_in_length_errors);
11419 ESTAT_ADD(rx_out_length_errors);
11420 ESTAT_ADD(rx_64_or_less_octet_packets);
11421 ESTAT_ADD(rx_65_to_127_octet_packets);
11422 ESTAT_ADD(rx_128_to_255_octet_packets);
11423 ESTAT_ADD(rx_256_to_511_octet_packets);
11424 ESTAT_ADD(rx_512_to_1023_octet_packets);
11425 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11426 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11427 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11428 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11429 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11431 ESTAT_ADD(tx_octets);
11432 ESTAT_ADD(tx_collisions);
11433 ESTAT_ADD(tx_xon_sent);
11434 ESTAT_ADD(tx_xoff_sent);
11435 ESTAT_ADD(tx_flow_control);
11436 ESTAT_ADD(tx_mac_errors);
11437 ESTAT_ADD(tx_single_collisions);
11438 ESTAT_ADD(tx_mult_collisions);
11439 ESTAT_ADD(tx_deferred);
11440 ESTAT_ADD(tx_excessive_collisions);
11441 ESTAT_ADD(tx_late_collisions);
11442 ESTAT_ADD(tx_collide_2times);
11443 ESTAT_ADD(tx_collide_3times);
11444 ESTAT_ADD(tx_collide_4times);
11445 ESTAT_ADD(tx_collide_5times);
11446 ESTAT_ADD(tx_collide_6times);
11447 ESTAT_ADD(tx_collide_7times);
11448 ESTAT_ADD(tx_collide_8times);
11449 ESTAT_ADD(tx_collide_9times);
11450 ESTAT_ADD(tx_collide_10times);
11451 ESTAT_ADD(tx_collide_11times);
11452 ESTAT_ADD(tx_collide_12times);
11453 ESTAT_ADD(tx_collide_13times);
11454 ESTAT_ADD(tx_collide_14times);
11455 ESTAT_ADD(tx_collide_15times);
11456 ESTAT_ADD(tx_ucast_packets);
11457 ESTAT_ADD(tx_mcast_packets);
11458 ESTAT_ADD(tx_bcast_packets);
11459 ESTAT_ADD(tx_carrier_sense_errors);
11460 ESTAT_ADD(tx_discards);
11461 ESTAT_ADD(tx_errors);
11463 ESTAT_ADD(dma_writeq_full);
11464 ESTAT_ADD(dma_write_prioq_full);
11465 ESTAT_ADD(rxbds_empty);
11466 ESTAT_ADD(rx_discards);
11467 ESTAT_ADD(rx_errors);
11468 ESTAT_ADD(rx_threshold_hit);
11470 ESTAT_ADD(dma_readq_full);
11471 ESTAT_ADD(dma_read_prioq_full);
11472 ESTAT_ADD(tx_comp_queue_full);
11474 ESTAT_ADD(ring_set_send_prod_index);
11475 ESTAT_ADD(ring_status_update);
11476 ESTAT_ADD(nic_irqs);
11477 ESTAT_ADD(nic_avoided_irqs);
11478 ESTAT_ADD(nic_tx_threshold_hit);
11480 ESTAT_ADD(mbuf_lwm_thresh_hit);
11483 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11485 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11486 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11488 stats->rx_packets = old_stats->rx_packets +
11489 get_stat64(&hw_stats->rx_ucast_packets) +
11490 get_stat64(&hw_stats->rx_mcast_packets) +
11491 get_stat64(&hw_stats->rx_bcast_packets);
11493 stats->tx_packets = old_stats->tx_packets +
11494 get_stat64(&hw_stats->tx_ucast_packets) +
11495 get_stat64(&hw_stats->tx_mcast_packets) +
11496 get_stat64(&hw_stats->tx_bcast_packets);
11498 stats->rx_bytes = old_stats->rx_bytes +
11499 get_stat64(&hw_stats->rx_octets);
11500 stats->tx_bytes = old_stats->tx_bytes +
11501 get_stat64(&hw_stats->tx_octets);
11503 stats->rx_errors = old_stats->rx_errors +
11504 get_stat64(&hw_stats->rx_errors);
11505 stats->tx_errors = old_stats->tx_errors +
11506 get_stat64(&hw_stats->tx_errors) +
11507 get_stat64(&hw_stats->tx_mac_errors) +
11508 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11509 get_stat64(&hw_stats->tx_discards);
11511 stats->multicast = old_stats->multicast +
11512 get_stat64(&hw_stats->rx_mcast_packets);
11513 stats->collisions = old_stats->collisions +
11514 get_stat64(&hw_stats->tx_collisions);
11516 stats->rx_length_errors = old_stats->rx_length_errors +
11517 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11518 get_stat64(&hw_stats->rx_undersize_packets);
11520 stats->rx_over_errors = old_stats->rx_over_errors +
11521 get_stat64(&hw_stats->rxbds_empty);
11522 stats->rx_frame_errors = old_stats->rx_frame_errors +
11523 get_stat64(&hw_stats->rx_align_errors);
11524 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11525 get_stat64(&hw_stats->tx_discards);
11526 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11527 get_stat64(&hw_stats->tx_carrier_sense_errors);
11529 stats->rx_crc_errors = old_stats->rx_crc_errors +
11530 tg3_calc_crc_errors(tp);
11532 stats->rx_missed_errors = old_stats->rx_missed_errors +
11533 get_stat64(&hw_stats->rx_discards);
11535 stats->rx_dropped = tp->rx_dropped;
11536 stats->tx_dropped = tp->tx_dropped;
11539 static int tg3_get_regs_len(struct net_device *dev)
11541 return TG3_REG_BLK_SIZE;
11544 static void tg3_get_regs(struct net_device *dev,
11545 struct ethtool_regs *regs, void *_p)
11547 struct tg3 *tp = netdev_priv(dev);
11551 memset(_p, 0, TG3_REG_BLK_SIZE);
11553 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11556 tg3_full_lock(tp, 0);
11558 tg3_dump_legacy_regs(tp, (u32 *)_p);
11560 tg3_full_unlock(tp);
11563 static int tg3_get_eeprom_len(struct net_device *dev)
11565 struct tg3 *tp = netdev_priv(dev);
11567 return tp->nvram_size;
11570 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11572 struct tg3 *tp = netdev_priv(dev);
11575 u32 i, offset, len, b_offset, b_count;
11578 if (tg3_flag(tp, NO_NVRAM))
11581 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11584 offset = eeprom->offset;
11588 eeprom->magic = TG3_EEPROM_MAGIC;
11591 /* adjustments to start on required 4 byte boundary */
11592 b_offset = offset & 3;
11593 b_count = 4 - b_offset;
11594 if (b_count > len) {
11595 /* i.e. offset=1 len=2 */
11598 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11601 memcpy(data, ((char *)&val) + b_offset, b_count);
11604 eeprom->len += b_count;
11607 /* read bytes up to the last 4 byte boundary */
11608 pd = &data[eeprom->len];
11609 for (i = 0; i < (len - (len & 3)); i += 4) {
11610 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11615 memcpy(pd + i, &val, 4);
11620 /* read last bytes not ending on 4 byte boundary */
11621 pd = &data[eeprom->len];
11623 b_offset = offset + len - b_count;
11624 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11627 memcpy(pd, &val, b_count);
11628 eeprom->len += b_count;
11633 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11635 struct tg3 *tp = netdev_priv(dev);
11637 u32 offset, len, b_offset, odd_len;
11641 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11644 if (tg3_flag(tp, NO_NVRAM) ||
11645 eeprom->magic != TG3_EEPROM_MAGIC)
11648 offset = eeprom->offset;
11651 if ((b_offset = (offset & 3))) {
11652 /* adjustments to start on required 4 byte boundary */
11653 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11664 /* adjustments to end on required 4 byte boundary */
11666 len = (len + 3) & ~3;
11667 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11673 if (b_offset || odd_len) {
11674 buf = kmalloc(len, GFP_KERNEL);
11678 memcpy(buf, &start, 4);
11680 memcpy(buf+len-4, &end, 4);
11681 memcpy(buf + b_offset, data, eeprom->len);
11684 ret = tg3_nvram_write_block(tp, offset, len, buf);
11692 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11694 struct tg3 *tp = netdev_priv(dev);
11696 if (tg3_flag(tp, USE_PHYLIB)) {
11697 struct phy_device *phydev;
11698 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11700 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11701 return phy_ethtool_gset(phydev, cmd);
11704 cmd->supported = (SUPPORTED_Autoneg);
11706 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11707 cmd->supported |= (SUPPORTED_1000baseT_Half |
11708 SUPPORTED_1000baseT_Full);
11710 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11711 cmd->supported |= (SUPPORTED_100baseT_Half |
11712 SUPPORTED_100baseT_Full |
11713 SUPPORTED_10baseT_Half |
11714 SUPPORTED_10baseT_Full |
11716 cmd->port = PORT_TP;
11718 cmd->supported |= SUPPORTED_FIBRE;
11719 cmd->port = PORT_FIBRE;
11722 cmd->advertising = tp->link_config.advertising;
11723 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11724 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11725 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11726 cmd->advertising |= ADVERTISED_Pause;
11728 cmd->advertising |= ADVERTISED_Pause |
11729 ADVERTISED_Asym_Pause;
11731 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11732 cmd->advertising |= ADVERTISED_Asym_Pause;
11735 if (netif_running(dev) && tp->link_up) {
11736 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11737 cmd->duplex = tp->link_config.active_duplex;
11738 cmd->lp_advertising = tp->link_config.rmt_adv;
11739 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11740 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11741 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11743 cmd->eth_tp_mdix = ETH_TP_MDI;
11746 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11747 cmd->duplex = DUPLEX_UNKNOWN;
11748 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11750 cmd->phy_address = tp->phy_addr;
11751 cmd->transceiver = XCVR_INTERNAL;
11752 cmd->autoneg = tp->link_config.autoneg;
11758 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11760 struct tg3 *tp = netdev_priv(dev);
11761 u32 speed = ethtool_cmd_speed(cmd);
11763 if (tg3_flag(tp, USE_PHYLIB)) {
11764 struct phy_device *phydev;
11765 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11767 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11768 return phy_ethtool_sset(phydev, cmd);
11771 if (cmd->autoneg != AUTONEG_ENABLE &&
11772 cmd->autoneg != AUTONEG_DISABLE)
11775 if (cmd->autoneg == AUTONEG_DISABLE &&
11776 cmd->duplex != DUPLEX_FULL &&
11777 cmd->duplex != DUPLEX_HALF)
11780 if (cmd->autoneg == AUTONEG_ENABLE) {
11781 u32 mask = ADVERTISED_Autoneg |
11783 ADVERTISED_Asym_Pause;
11785 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11786 mask |= ADVERTISED_1000baseT_Half |
11787 ADVERTISED_1000baseT_Full;
11789 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11790 mask |= ADVERTISED_100baseT_Half |
11791 ADVERTISED_100baseT_Full |
11792 ADVERTISED_10baseT_Half |
11793 ADVERTISED_10baseT_Full |
11796 mask |= ADVERTISED_FIBRE;
11798 if (cmd->advertising & ~mask)
11801 mask &= (ADVERTISED_1000baseT_Half |
11802 ADVERTISED_1000baseT_Full |
11803 ADVERTISED_100baseT_Half |
11804 ADVERTISED_100baseT_Full |
11805 ADVERTISED_10baseT_Half |
11806 ADVERTISED_10baseT_Full);
11808 cmd->advertising &= mask;
11810 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11811 if (speed != SPEED_1000)
11814 if (cmd->duplex != DUPLEX_FULL)
11817 if (speed != SPEED_100 &&
11823 tg3_full_lock(tp, 0);
11825 tp->link_config.autoneg = cmd->autoneg;
11826 if (cmd->autoneg == AUTONEG_ENABLE) {
11827 tp->link_config.advertising = (cmd->advertising |
11828 ADVERTISED_Autoneg);
11829 tp->link_config.speed = SPEED_UNKNOWN;
11830 tp->link_config.duplex = DUPLEX_UNKNOWN;
11832 tp->link_config.advertising = 0;
11833 tp->link_config.speed = speed;
11834 tp->link_config.duplex = cmd->duplex;
11837 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11839 tg3_warn_mgmt_link_flap(tp);
11841 if (netif_running(dev))
11842 tg3_setup_phy(tp, true);
11844 tg3_full_unlock(tp);
11849 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11851 struct tg3 *tp = netdev_priv(dev);
11853 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11854 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11855 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11856 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11859 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11861 struct tg3 *tp = netdev_priv(dev);
11863 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11864 wol->supported = WAKE_MAGIC;
11866 wol->supported = 0;
11868 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11869 wol->wolopts = WAKE_MAGIC;
11870 memset(&wol->sopass, 0, sizeof(wol->sopass));
11873 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11875 struct tg3 *tp = netdev_priv(dev);
11876 struct device *dp = &tp->pdev->dev;
11878 if (wol->wolopts & ~WAKE_MAGIC)
11880 if ((wol->wolopts & WAKE_MAGIC) &&
11881 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11884 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11886 spin_lock_bh(&tp->lock);
11887 if (device_may_wakeup(dp))
11888 tg3_flag_set(tp, WOL_ENABLE);
11890 tg3_flag_clear(tp, WOL_ENABLE);
11891 spin_unlock_bh(&tp->lock);
11896 static u32 tg3_get_msglevel(struct net_device *dev)
11898 struct tg3 *tp = netdev_priv(dev);
11899 return tp->msg_enable;
11902 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11904 struct tg3 *tp = netdev_priv(dev);
11905 tp->msg_enable = value;
11908 static int tg3_nway_reset(struct net_device *dev)
11910 struct tg3 *tp = netdev_priv(dev);
11913 if (!netif_running(dev))
11916 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11919 tg3_warn_mgmt_link_flap(tp);
11921 if (tg3_flag(tp, USE_PHYLIB)) {
11922 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11924 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11928 spin_lock_bh(&tp->lock);
11930 tg3_readphy(tp, MII_BMCR, &bmcr);
11931 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11932 ((bmcr & BMCR_ANENABLE) ||
11933 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11934 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11938 spin_unlock_bh(&tp->lock);
11944 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11946 struct tg3 *tp = netdev_priv(dev);
11948 ering->rx_max_pending = tp->rx_std_ring_mask;
11949 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11950 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11952 ering->rx_jumbo_max_pending = 0;
11954 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11956 ering->rx_pending = tp->rx_pending;
11957 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11958 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11960 ering->rx_jumbo_pending = 0;
11962 ering->tx_pending = tp->napi[0].tx_pending;
11965 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11967 struct tg3 *tp = netdev_priv(dev);
11968 int i, irq_sync = 0, err = 0;
11970 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11971 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11972 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11973 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11974 (tg3_flag(tp, TSO_BUG) &&
11975 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11978 if (netif_running(dev)) {
11980 tg3_netif_stop(tp);
11984 tg3_full_lock(tp, irq_sync);
11986 tp->rx_pending = ering->rx_pending;
11988 if (tg3_flag(tp, MAX_RXPEND_64) &&
11989 tp->rx_pending > 63)
11990 tp->rx_pending = 63;
11991 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11993 for (i = 0; i < tp->irq_max; i++)
11994 tp->napi[i].tx_pending = ering->tx_pending;
11996 if (netif_running(dev)) {
11997 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11998 err = tg3_restart_hw(tp, false);
12000 tg3_netif_start(tp);
12003 tg3_full_unlock(tp);
12005 if (irq_sync && !err)
12011 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12013 struct tg3 *tp = netdev_priv(dev);
12015 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12017 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12018 epause->rx_pause = 1;
12020 epause->rx_pause = 0;
12022 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12023 epause->tx_pause = 1;
12025 epause->tx_pause = 0;
12028 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12030 struct tg3 *tp = netdev_priv(dev);
12033 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12034 tg3_warn_mgmt_link_flap(tp);
12036 if (tg3_flag(tp, USE_PHYLIB)) {
12038 struct phy_device *phydev;
12040 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12042 if (!(phydev->supported & SUPPORTED_Pause) ||
12043 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12044 (epause->rx_pause != epause->tx_pause)))
12047 tp->link_config.flowctrl = 0;
12048 if (epause->rx_pause) {
12049 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12051 if (epause->tx_pause) {
12052 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12053 newadv = ADVERTISED_Pause;
12055 newadv = ADVERTISED_Pause |
12056 ADVERTISED_Asym_Pause;
12057 } else if (epause->tx_pause) {
12058 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12059 newadv = ADVERTISED_Asym_Pause;
12063 if (epause->autoneg)
12064 tg3_flag_set(tp, PAUSE_AUTONEG);
12066 tg3_flag_clear(tp, PAUSE_AUTONEG);
12068 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12069 u32 oldadv = phydev->advertising &
12070 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12071 if (oldadv != newadv) {
12072 phydev->advertising &=
12073 ~(ADVERTISED_Pause |
12074 ADVERTISED_Asym_Pause);
12075 phydev->advertising |= newadv;
12076 if (phydev->autoneg) {
12078 * Always renegotiate the link to
12079 * inform our link partner of our
12080 * flow control settings, even if the
12081 * flow control is forced. Let
12082 * tg3_adjust_link() do the final
12083 * flow control setup.
12085 return phy_start_aneg(phydev);
12089 if (!epause->autoneg)
12090 tg3_setup_flow_control(tp, 0, 0);
12092 tp->link_config.advertising &=
12093 ~(ADVERTISED_Pause |
12094 ADVERTISED_Asym_Pause);
12095 tp->link_config.advertising |= newadv;
12100 if (netif_running(dev)) {
12101 tg3_netif_stop(tp);
12105 tg3_full_lock(tp, irq_sync);
12107 if (epause->autoneg)
12108 tg3_flag_set(tp, PAUSE_AUTONEG);
12110 tg3_flag_clear(tp, PAUSE_AUTONEG);
12111 if (epause->rx_pause)
12112 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12114 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12115 if (epause->tx_pause)
12116 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12118 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12120 if (netif_running(dev)) {
12121 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12122 err = tg3_restart_hw(tp, false);
12124 tg3_netif_start(tp);
12127 tg3_full_unlock(tp);
12130 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12135 static int tg3_get_sset_count(struct net_device *dev, int sset)
12139 return TG3_NUM_TEST;
12141 return TG3_NUM_STATS;
12143 return -EOPNOTSUPP;
12147 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12148 u32 *rules __always_unused)
12150 struct tg3 *tp = netdev_priv(dev);
12152 if (!tg3_flag(tp, SUPPORT_MSIX))
12153 return -EOPNOTSUPP;
12155 switch (info->cmd) {
12156 case ETHTOOL_GRXRINGS:
12157 if (netif_running(tp->dev))
12158 info->data = tp->rxq_cnt;
12160 info->data = num_online_cpus();
12161 if (info->data > TG3_RSS_MAX_NUM_QS)
12162 info->data = TG3_RSS_MAX_NUM_QS;
12165 /* The first interrupt vector only
12166 * handles link interrupts.
12172 return -EOPNOTSUPP;
12176 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12179 struct tg3 *tp = netdev_priv(dev);
12181 if (tg3_flag(tp, SUPPORT_MSIX))
12182 size = TG3_RSS_INDIR_TBL_SIZE;
12187 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12189 struct tg3 *tp = netdev_priv(dev);
12192 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12193 indir[i] = tp->rss_ind_tbl[i];
12198 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12200 struct tg3 *tp = netdev_priv(dev);
12203 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12204 tp->rss_ind_tbl[i] = indir[i];
12206 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12209 /* It is legal to write the indirection
12210 * table while the device is running.
12212 tg3_full_lock(tp, 0);
12213 tg3_rss_write_indir_tbl(tp);
12214 tg3_full_unlock(tp);
12219 static void tg3_get_channels(struct net_device *dev,
12220 struct ethtool_channels *channel)
12222 struct tg3 *tp = netdev_priv(dev);
12223 u32 deflt_qs = netif_get_num_default_rss_queues();
12225 channel->max_rx = tp->rxq_max;
12226 channel->max_tx = tp->txq_max;
12228 if (netif_running(dev)) {
12229 channel->rx_count = tp->rxq_cnt;
12230 channel->tx_count = tp->txq_cnt;
12233 channel->rx_count = tp->rxq_req;
12235 channel->rx_count = min(deflt_qs, tp->rxq_max);
12238 channel->tx_count = tp->txq_req;
12240 channel->tx_count = min(deflt_qs, tp->txq_max);
12244 static int tg3_set_channels(struct net_device *dev,
12245 struct ethtool_channels *channel)
12247 struct tg3 *tp = netdev_priv(dev);
12249 if (!tg3_flag(tp, SUPPORT_MSIX))
12250 return -EOPNOTSUPP;
12252 if (channel->rx_count > tp->rxq_max ||
12253 channel->tx_count > tp->txq_max)
12256 tp->rxq_req = channel->rx_count;
12257 tp->txq_req = channel->tx_count;
12259 if (!netif_running(dev))
12264 tg3_carrier_off(tp);
12266 tg3_start(tp, true, false, false);
12271 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12273 switch (stringset) {
12275 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12278 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12281 WARN_ON(1); /* we need a WARN() */
12286 static int tg3_set_phys_id(struct net_device *dev,
12287 enum ethtool_phys_id_state state)
12289 struct tg3 *tp = netdev_priv(dev);
12291 if (!netif_running(tp->dev))
12295 case ETHTOOL_ID_ACTIVE:
12296 return 1; /* cycle on/off once per second */
12298 case ETHTOOL_ID_ON:
12299 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12300 LED_CTRL_1000MBPS_ON |
12301 LED_CTRL_100MBPS_ON |
12302 LED_CTRL_10MBPS_ON |
12303 LED_CTRL_TRAFFIC_OVERRIDE |
12304 LED_CTRL_TRAFFIC_BLINK |
12305 LED_CTRL_TRAFFIC_LED);
12308 case ETHTOOL_ID_OFF:
12309 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12310 LED_CTRL_TRAFFIC_OVERRIDE);
12313 case ETHTOOL_ID_INACTIVE:
12314 tw32(MAC_LED_CTRL, tp->led_ctrl);
12321 static void tg3_get_ethtool_stats(struct net_device *dev,
12322 struct ethtool_stats *estats, u64 *tmp_stats)
12324 struct tg3 *tp = netdev_priv(dev);
12327 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12329 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12332 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12336 u32 offset = 0, len = 0;
12339 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12342 if (magic == TG3_EEPROM_MAGIC) {
12343 for (offset = TG3_NVM_DIR_START;
12344 offset < TG3_NVM_DIR_END;
12345 offset += TG3_NVM_DIRENT_SIZE) {
12346 if (tg3_nvram_read(tp, offset, &val))
12349 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12350 TG3_NVM_DIRTYPE_EXTVPD)
12354 if (offset != TG3_NVM_DIR_END) {
12355 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12356 if (tg3_nvram_read(tp, offset + 4, &offset))
12359 offset = tg3_nvram_logical_addr(tp, offset);
12363 if (!offset || !len) {
12364 offset = TG3_NVM_VPD_OFF;
12365 len = TG3_NVM_VPD_LEN;
12368 buf = kmalloc(len, GFP_KERNEL);
12372 if (magic == TG3_EEPROM_MAGIC) {
12373 for (i = 0; i < len; i += 4) {
12374 /* The data is in little-endian format in NVRAM.
12375 * Use the big-endian read routines to preserve
12376 * the byte order as it exists in NVRAM.
12378 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12384 unsigned int pos = 0;
12386 ptr = (u8 *)&buf[0];
12387 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12388 cnt = pci_read_vpd(tp->pdev, pos,
12390 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12408 #define NVRAM_TEST_SIZE 0x100
12409 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12410 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12411 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12412 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12413 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12414 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12415 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12416 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12418 static int tg3_test_nvram(struct tg3 *tp)
12420 u32 csum, magic, len;
12422 int i, j, k, err = 0, size;
12424 if (tg3_flag(tp, NO_NVRAM))
12427 if (tg3_nvram_read(tp, 0, &magic) != 0)
12430 if (magic == TG3_EEPROM_MAGIC)
12431 size = NVRAM_TEST_SIZE;
12432 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12433 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12434 TG3_EEPROM_SB_FORMAT_1) {
12435 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12436 case TG3_EEPROM_SB_REVISION_0:
12437 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12439 case TG3_EEPROM_SB_REVISION_2:
12440 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12442 case TG3_EEPROM_SB_REVISION_3:
12443 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12445 case TG3_EEPROM_SB_REVISION_4:
12446 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12448 case TG3_EEPROM_SB_REVISION_5:
12449 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12451 case TG3_EEPROM_SB_REVISION_6:
12452 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12459 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12460 size = NVRAM_SELFBOOT_HW_SIZE;
12464 buf = kmalloc(size, GFP_KERNEL);
12469 for (i = 0, j = 0; i < size; i += 4, j++) {
12470 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12477 /* Selfboot format */
12478 magic = be32_to_cpu(buf[0]);
12479 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12480 TG3_EEPROM_MAGIC_FW) {
12481 u8 *buf8 = (u8 *) buf, csum8 = 0;
12483 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12484 TG3_EEPROM_SB_REVISION_2) {
12485 /* For rev 2, the csum doesn't include the MBA. */
12486 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12488 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12491 for (i = 0; i < size; i++)
12504 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12505 TG3_EEPROM_MAGIC_HW) {
12506 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12507 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12508 u8 *buf8 = (u8 *) buf;
12510 /* Separate the parity bits and the data bytes. */
12511 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12512 if ((i == 0) || (i == 8)) {
12516 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12517 parity[k++] = buf8[i] & msk;
12519 } else if (i == 16) {
12523 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12524 parity[k++] = buf8[i] & msk;
12527 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12528 parity[k++] = buf8[i] & msk;
12531 data[j++] = buf8[i];
12535 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12536 u8 hw8 = hweight8(data[i]);
12538 if ((hw8 & 0x1) && parity[i])
12540 else if (!(hw8 & 0x1) && !parity[i])
12549 /* Bootstrap checksum at offset 0x10 */
12550 csum = calc_crc((unsigned char *) buf, 0x10);
12551 if (csum != le32_to_cpu(buf[0x10/4]))
12554 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12555 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12556 if (csum != le32_to_cpu(buf[0xfc/4]))
12561 buf = tg3_vpd_readblock(tp, &len);
12565 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12567 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12571 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12574 i += PCI_VPD_LRDT_TAG_SIZE;
12575 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12576 PCI_VPD_RO_KEYWORD_CHKSUM);
12580 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12582 for (i = 0; i <= j; i++)
12583 csum8 += ((u8 *)buf)[i];
12597 #define TG3_SERDES_TIMEOUT_SEC 2
12598 #define TG3_COPPER_TIMEOUT_SEC 6
12600 static int tg3_test_link(struct tg3 *tp)
12604 if (!netif_running(tp->dev))
12607 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12608 max = TG3_SERDES_TIMEOUT_SEC;
12610 max = TG3_COPPER_TIMEOUT_SEC;
12612 for (i = 0; i < max; i++) {
12616 if (msleep_interruptible(1000))
12623 /* Only test the commonly used registers */
12624 static int tg3_test_registers(struct tg3 *tp)
12626 int i, is_5705, is_5750;
12627 u32 offset, read_mask, write_mask, val, save_val, read_val;
12631 #define TG3_FL_5705 0x1
12632 #define TG3_FL_NOT_5705 0x2
12633 #define TG3_FL_NOT_5788 0x4
12634 #define TG3_FL_NOT_5750 0x8
12638 /* MAC Control Registers */
12639 { MAC_MODE, TG3_FL_NOT_5705,
12640 0x00000000, 0x00ef6f8c },
12641 { MAC_MODE, TG3_FL_5705,
12642 0x00000000, 0x01ef6b8c },
12643 { MAC_STATUS, TG3_FL_NOT_5705,
12644 0x03800107, 0x00000000 },
12645 { MAC_STATUS, TG3_FL_5705,
12646 0x03800100, 0x00000000 },
12647 { MAC_ADDR_0_HIGH, 0x0000,
12648 0x00000000, 0x0000ffff },
12649 { MAC_ADDR_0_LOW, 0x0000,
12650 0x00000000, 0xffffffff },
12651 { MAC_RX_MTU_SIZE, 0x0000,
12652 0x00000000, 0x0000ffff },
12653 { MAC_TX_MODE, 0x0000,
12654 0x00000000, 0x00000070 },
12655 { MAC_TX_LENGTHS, 0x0000,
12656 0x00000000, 0x00003fff },
12657 { MAC_RX_MODE, TG3_FL_NOT_5705,
12658 0x00000000, 0x000007fc },
12659 { MAC_RX_MODE, TG3_FL_5705,
12660 0x00000000, 0x000007dc },
12661 { MAC_HASH_REG_0, 0x0000,
12662 0x00000000, 0xffffffff },
12663 { MAC_HASH_REG_1, 0x0000,
12664 0x00000000, 0xffffffff },
12665 { MAC_HASH_REG_2, 0x0000,
12666 0x00000000, 0xffffffff },
12667 { MAC_HASH_REG_3, 0x0000,
12668 0x00000000, 0xffffffff },
12670 /* Receive Data and Receive BD Initiator Control Registers. */
12671 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12672 0x00000000, 0xffffffff },
12673 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12674 0x00000000, 0xffffffff },
12675 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12676 0x00000000, 0x00000003 },
12677 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12678 0x00000000, 0xffffffff },
12679 { RCVDBDI_STD_BD+0, 0x0000,
12680 0x00000000, 0xffffffff },
12681 { RCVDBDI_STD_BD+4, 0x0000,
12682 0x00000000, 0xffffffff },
12683 { RCVDBDI_STD_BD+8, 0x0000,
12684 0x00000000, 0xffff0002 },
12685 { RCVDBDI_STD_BD+0xc, 0x0000,
12686 0x00000000, 0xffffffff },
12688 /* Receive BD Initiator Control Registers. */
12689 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12690 0x00000000, 0xffffffff },
12691 { RCVBDI_STD_THRESH, TG3_FL_5705,
12692 0x00000000, 0x000003ff },
12693 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12694 0x00000000, 0xffffffff },
12696 /* Host Coalescing Control Registers. */
12697 { HOSTCC_MODE, TG3_FL_NOT_5705,
12698 0x00000000, 0x00000004 },
12699 { HOSTCC_MODE, TG3_FL_5705,
12700 0x00000000, 0x000000f6 },
12701 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12702 0x00000000, 0xffffffff },
12703 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12704 0x00000000, 0x000003ff },
12705 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12706 0x00000000, 0xffffffff },
12707 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12708 0x00000000, 0x000003ff },
12709 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12710 0x00000000, 0xffffffff },
12711 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12712 0x00000000, 0x000000ff },
12713 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12714 0x00000000, 0xffffffff },
12715 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12716 0x00000000, 0x000000ff },
12717 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12718 0x00000000, 0xffffffff },
12719 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12720 0x00000000, 0xffffffff },
12721 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12722 0x00000000, 0xffffffff },
12723 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12724 0x00000000, 0x000000ff },
12725 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12726 0x00000000, 0xffffffff },
12727 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12728 0x00000000, 0x000000ff },
12729 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12730 0x00000000, 0xffffffff },
12731 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12732 0x00000000, 0xffffffff },
12733 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12734 0x00000000, 0xffffffff },
12735 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12736 0x00000000, 0xffffffff },
12737 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12738 0x00000000, 0xffffffff },
12739 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12740 0xffffffff, 0x00000000 },
12741 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12742 0xffffffff, 0x00000000 },
12744 /* Buffer Manager Control Registers. */
12745 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12746 0x00000000, 0x007fff80 },
12747 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12748 0x00000000, 0x007fffff },
12749 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12750 0x00000000, 0x0000003f },
12751 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12752 0x00000000, 0x000001ff },
12753 { BUFMGR_MB_HIGH_WATER, 0x0000,
12754 0x00000000, 0x000001ff },
12755 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12756 0xffffffff, 0x00000000 },
12757 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12758 0xffffffff, 0x00000000 },
12760 /* Mailbox Registers */
12761 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12762 0x00000000, 0x000001ff },
12763 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12764 0x00000000, 0x000001ff },
12765 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12766 0x00000000, 0x000007ff },
12767 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12768 0x00000000, 0x000001ff },
12770 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12773 is_5705 = is_5750 = 0;
12774 if (tg3_flag(tp, 5705_PLUS)) {
12776 if (tg3_flag(tp, 5750_PLUS))
12780 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12781 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12784 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12787 if (tg3_flag(tp, IS_5788) &&
12788 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12791 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12794 offset = (u32) reg_tbl[i].offset;
12795 read_mask = reg_tbl[i].read_mask;
12796 write_mask = reg_tbl[i].write_mask;
12798 /* Save the original register content */
12799 save_val = tr32(offset);
12801 /* Determine the read-only value. */
12802 read_val = save_val & read_mask;
12804 /* Write zero to the register, then make sure the read-only bits
12805 * are not changed and the read/write bits are all zeros.
12809 val = tr32(offset);
12811 /* Test the read-only and read/write bits. */
12812 if (((val & read_mask) != read_val) || (val & write_mask))
12815 /* Write ones to all the bits defined by RdMask and WrMask, then
12816 * make sure the read-only bits are not changed and the
12817 * read/write bits are all ones.
12819 tw32(offset, read_mask | write_mask);
12821 val = tr32(offset);
12823 /* Test the read-only bits. */
12824 if ((val & read_mask) != read_val)
12827 /* Test the read/write bits. */
12828 if ((val & write_mask) != write_mask)
12831 tw32(offset, save_val);
12837 if (netif_msg_hw(tp))
12838 netdev_err(tp->dev,
12839 "Register test failed at offset %x\n", offset);
12840 tw32(offset, save_val);
12844 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12846 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12850 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12851 for (j = 0; j < len; j += 4) {
12854 tg3_write_mem(tp, offset + j, test_pattern[i]);
12855 tg3_read_mem(tp, offset + j, &val);
12856 if (val != test_pattern[i])
12863 static int tg3_test_memory(struct tg3 *tp)
12865 static struct mem_entry {
12868 } mem_tbl_570x[] = {
12869 { 0x00000000, 0x00b50},
12870 { 0x00002000, 0x1c000},
12871 { 0xffffffff, 0x00000}
12872 }, mem_tbl_5705[] = {
12873 { 0x00000100, 0x0000c},
12874 { 0x00000200, 0x00008},
12875 { 0x00004000, 0x00800},
12876 { 0x00006000, 0x01000},
12877 { 0x00008000, 0x02000},
12878 { 0x00010000, 0x0e000},
12879 { 0xffffffff, 0x00000}
12880 }, mem_tbl_5755[] = {
12881 { 0x00000200, 0x00008},
12882 { 0x00004000, 0x00800},
12883 { 0x00006000, 0x00800},
12884 { 0x00008000, 0x02000},
12885 { 0x00010000, 0x0c000},
12886 { 0xffffffff, 0x00000}
12887 }, mem_tbl_5906[] = {
12888 { 0x00000200, 0x00008},
12889 { 0x00004000, 0x00400},
12890 { 0x00006000, 0x00400},
12891 { 0x00008000, 0x01000},
12892 { 0x00010000, 0x01000},
12893 { 0xffffffff, 0x00000}
12894 }, mem_tbl_5717[] = {
12895 { 0x00000200, 0x00008},
12896 { 0x00010000, 0x0a000},
12897 { 0x00020000, 0x13c00},
12898 { 0xffffffff, 0x00000}
12899 }, mem_tbl_57765[] = {
12900 { 0x00000200, 0x00008},
12901 { 0x00004000, 0x00800},
12902 { 0x00006000, 0x09800},
12903 { 0x00010000, 0x0a000},
12904 { 0xffffffff, 0x00000}
12906 struct mem_entry *mem_tbl;
12910 if (tg3_flag(tp, 5717_PLUS))
12911 mem_tbl = mem_tbl_5717;
12912 else if (tg3_flag(tp, 57765_CLASS) ||
12913 tg3_asic_rev(tp) == ASIC_REV_5762)
12914 mem_tbl = mem_tbl_57765;
12915 else if (tg3_flag(tp, 5755_PLUS))
12916 mem_tbl = mem_tbl_5755;
12917 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12918 mem_tbl = mem_tbl_5906;
12919 else if (tg3_flag(tp, 5705_PLUS))
12920 mem_tbl = mem_tbl_5705;
12922 mem_tbl = mem_tbl_570x;
12924 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12925 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12933 #define TG3_TSO_MSS 500
12935 #define TG3_TSO_IP_HDR_LEN 20
12936 #define TG3_TSO_TCP_HDR_LEN 20
12937 #define TG3_TSO_TCP_OPT_LEN 12
12939 static const u8 tg3_tso_header[] = {
12941 0x45, 0x00, 0x00, 0x00,
12942 0x00, 0x00, 0x40, 0x00,
12943 0x40, 0x06, 0x00, 0x00,
12944 0x0a, 0x00, 0x00, 0x01,
12945 0x0a, 0x00, 0x00, 0x02,
12946 0x0d, 0x00, 0xe0, 0x00,
12947 0x00, 0x00, 0x01, 0x00,
12948 0x00, 0x00, 0x02, 0x00,
12949 0x80, 0x10, 0x10, 0x00,
12950 0x14, 0x09, 0x00, 0x00,
12951 0x01, 0x01, 0x08, 0x0a,
12952 0x11, 0x11, 0x11, 0x11,
12953 0x11, 0x11, 0x11, 0x11,
12956 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12958 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12959 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12961 struct sk_buff *skb;
12962 u8 *tx_data, *rx_data;
12964 int num_pkts, tx_len, rx_len, i, err;
12965 struct tg3_rx_buffer_desc *desc;
12966 struct tg3_napi *tnapi, *rnapi;
12967 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12969 tnapi = &tp->napi[0];
12970 rnapi = &tp->napi[0];
12971 if (tp->irq_cnt > 1) {
12972 if (tg3_flag(tp, ENABLE_RSS))
12973 rnapi = &tp->napi[1];
12974 if (tg3_flag(tp, ENABLE_TSS))
12975 tnapi = &tp->napi[1];
12977 coal_now = tnapi->coal_now | rnapi->coal_now;
12982 skb = netdev_alloc_skb(tp->dev, tx_len);
12986 tx_data = skb_put(skb, tx_len);
12987 memcpy(tx_data, tp->dev->dev_addr, 6);
12988 memset(tx_data + 6, 0x0, 8);
12990 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12992 if (tso_loopback) {
12993 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12995 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12996 TG3_TSO_TCP_OPT_LEN;
12998 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12999 sizeof(tg3_tso_header));
13002 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13003 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13005 /* Set the total length field in the IP header */
13006 iph->tot_len = htons((u16)(mss + hdr_len));
13008 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13009 TXD_FLAG_CPU_POST_DMA);
13011 if (tg3_flag(tp, HW_TSO_1) ||
13012 tg3_flag(tp, HW_TSO_2) ||
13013 tg3_flag(tp, HW_TSO_3)) {
13015 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13016 th = (struct tcphdr *)&tx_data[val];
13019 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13021 if (tg3_flag(tp, HW_TSO_3)) {
13022 mss |= (hdr_len & 0xc) << 12;
13023 if (hdr_len & 0x10)
13024 base_flags |= 0x00000010;
13025 base_flags |= (hdr_len & 0x3e0) << 5;
13026 } else if (tg3_flag(tp, HW_TSO_2))
13027 mss |= hdr_len << 9;
13028 else if (tg3_flag(tp, HW_TSO_1) ||
13029 tg3_asic_rev(tp) == ASIC_REV_5705) {
13030 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13032 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13035 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13038 data_off = ETH_HLEN;
13040 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13041 tx_len > VLAN_ETH_FRAME_LEN)
13042 base_flags |= TXD_FLAG_JMB_PKT;
13045 for (i = data_off; i < tx_len; i++)
13046 tx_data[i] = (u8) (i & 0xff);
13048 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13049 if (pci_dma_mapping_error(tp->pdev, map)) {
13050 dev_kfree_skb(skb);
13054 val = tnapi->tx_prod;
13055 tnapi->tx_buffers[val].skb = skb;
13056 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13058 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13063 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13065 budget = tg3_tx_avail(tnapi);
13066 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13067 base_flags | TXD_FLAG_END, mss, 0)) {
13068 tnapi->tx_buffers[val].skb = NULL;
13069 dev_kfree_skb(skb);
13075 /* Sync BD data before updating mailbox */
13078 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13079 tr32_mailbox(tnapi->prodmbox);
13083 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13084 for (i = 0; i < 35; i++) {
13085 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13090 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13091 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13092 if ((tx_idx == tnapi->tx_prod) &&
13093 (rx_idx == (rx_start_idx + num_pkts)))
13097 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13098 dev_kfree_skb(skb);
13100 if (tx_idx != tnapi->tx_prod)
13103 if (rx_idx != rx_start_idx + num_pkts)
13107 while (rx_idx != rx_start_idx) {
13108 desc = &rnapi->rx_rcb[rx_start_idx++];
13109 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13110 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13112 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13113 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13116 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13119 if (!tso_loopback) {
13120 if (rx_len != tx_len)
13123 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13124 if (opaque_key != RXD_OPAQUE_RING_STD)
13127 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13130 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13131 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13132 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13136 if (opaque_key == RXD_OPAQUE_RING_STD) {
13137 rx_data = tpr->rx_std_buffers[desc_idx].data;
13138 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13140 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13141 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13142 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13147 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13148 PCI_DMA_FROMDEVICE);
13150 rx_data += TG3_RX_OFFSET(tp);
13151 for (i = data_off; i < rx_len; i++, val++) {
13152 if (*(rx_data + i) != (u8) (val & 0xff))
13159 /* tg3_free_rings will unmap and free the rx_data */
13164 #define TG3_STD_LOOPBACK_FAILED 1
13165 #define TG3_JMB_LOOPBACK_FAILED 2
13166 #define TG3_TSO_LOOPBACK_FAILED 4
13167 #define TG3_LOOPBACK_FAILED \
13168 (TG3_STD_LOOPBACK_FAILED | \
13169 TG3_JMB_LOOPBACK_FAILED | \
13170 TG3_TSO_LOOPBACK_FAILED)
13172 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13176 u32 jmb_pkt_sz = 9000;
13179 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13181 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13182 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13184 if (!netif_running(tp->dev)) {
13185 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13186 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13188 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13192 err = tg3_reset_hw(tp, true);
13194 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13195 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13197 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13201 if (tg3_flag(tp, ENABLE_RSS)) {
13204 /* Reroute all rx packets to the 1st queue */
13205 for (i = MAC_RSS_INDIR_TBL_0;
13206 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13210 /* HW errata - mac loopback fails in some cases on 5780.
13211 * Normal traffic and PHY loopback are not affected by
13212 * errata. Also, the MAC loopback test is deprecated for
13213 * all newer ASIC revisions.
13215 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13216 !tg3_flag(tp, CPMU_PRESENT)) {
13217 tg3_mac_loopback(tp, true);
13219 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13220 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13222 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13223 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13224 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13226 tg3_mac_loopback(tp, false);
13229 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13230 !tg3_flag(tp, USE_PHYLIB)) {
13233 tg3_phy_lpbk_set(tp, 0, false);
13235 /* Wait for link */
13236 for (i = 0; i < 100; i++) {
13237 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13242 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13243 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13244 if (tg3_flag(tp, TSO_CAPABLE) &&
13245 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13246 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13247 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13248 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13249 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13252 tg3_phy_lpbk_set(tp, 0, true);
13254 /* All link indications report up, but the hardware
13255 * isn't really ready for about 20 msec. Double it
13260 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13261 data[TG3_EXT_LOOPB_TEST] |=
13262 TG3_STD_LOOPBACK_FAILED;
13263 if (tg3_flag(tp, TSO_CAPABLE) &&
13264 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13265 data[TG3_EXT_LOOPB_TEST] |=
13266 TG3_TSO_LOOPBACK_FAILED;
13267 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13268 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13269 data[TG3_EXT_LOOPB_TEST] |=
13270 TG3_JMB_LOOPBACK_FAILED;
13273 /* Re-enable gphy autopowerdown. */
13274 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13275 tg3_phy_toggle_apd(tp, true);
13278 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13279 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13282 tp->phy_flags |= eee_cap;
13287 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13290 struct tg3 *tp = netdev_priv(dev);
13291 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13293 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13294 tg3_power_up(tp)) {
13295 etest->flags |= ETH_TEST_FL_FAILED;
13296 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13300 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13302 if (tg3_test_nvram(tp) != 0) {
13303 etest->flags |= ETH_TEST_FL_FAILED;
13304 data[TG3_NVRAM_TEST] = 1;
13306 if (!doextlpbk && tg3_test_link(tp)) {
13307 etest->flags |= ETH_TEST_FL_FAILED;
13308 data[TG3_LINK_TEST] = 1;
13310 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13311 int err, err2 = 0, irq_sync = 0;
13313 if (netif_running(dev)) {
13315 tg3_netif_stop(tp);
13319 tg3_full_lock(tp, irq_sync);
13320 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13321 err = tg3_nvram_lock(tp);
13322 tg3_halt_cpu(tp, RX_CPU_BASE);
13323 if (!tg3_flag(tp, 5705_PLUS))
13324 tg3_halt_cpu(tp, TX_CPU_BASE);
13326 tg3_nvram_unlock(tp);
13328 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13331 if (tg3_test_registers(tp) != 0) {
13332 etest->flags |= ETH_TEST_FL_FAILED;
13333 data[TG3_REGISTER_TEST] = 1;
13336 if (tg3_test_memory(tp) != 0) {
13337 etest->flags |= ETH_TEST_FL_FAILED;
13338 data[TG3_MEMORY_TEST] = 1;
13342 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13344 if (tg3_test_loopback(tp, data, doextlpbk))
13345 etest->flags |= ETH_TEST_FL_FAILED;
13347 tg3_full_unlock(tp);
13349 if (tg3_test_interrupt(tp) != 0) {
13350 etest->flags |= ETH_TEST_FL_FAILED;
13351 data[TG3_INTERRUPT_TEST] = 1;
13354 tg3_full_lock(tp, 0);
13356 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13357 if (netif_running(dev)) {
13358 tg3_flag_set(tp, INIT_COMPLETE);
13359 err2 = tg3_restart_hw(tp, true);
13361 tg3_netif_start(tp);
13364 tg3_full_unlock(tp);
13366 if (irq_sync && !err2)
13369 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13370 tg3_power_down(tp);
13374 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13375 struct ifreq *ifr, int cmd)
13377 struct tg3 *tp = netdev_priv(dev);
13378 struct hwtstamp_config stmpconf;
13380 if (!tg3_flag(tp, PTP_CAPABLE))
13383 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13386 if (stmpconf.flags)
13389 switch (stmpconf.tx_type) {
13390 case HWTSTAMP_TX_ON:
13391 tg3_flag_set(tp, TX_TSTAMP_EN);
13393 case HWTSTAMP_TX_OFF:
13394 tg3_flag_clear(tp, TX_TSTAMP_EN);
13400 switch (stmpconf.rx_filter) {
13401 case HWTSTAMP_FILTER_NONE:
13404 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13405 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13406 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13408 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13409 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13410 TG3_RX_PTP_CTL_SYNC_EVNT;
13412 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13413 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13414 TG3_RX_PTP_CTL_DELAY_REQ;
13416 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13417 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13418 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13420 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13421 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13422 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13424 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13425 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13426 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13428 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13429 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13430 TG3_RX_PTP_CTL_SYNC_EVNT;
13432 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13433 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13434 TG3_RX_PTP_CTL_SYNC_EVNT;
13436 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13437 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13438 TG3_RX_PTP_CTL_SYNC_EVNT;
13440 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13441 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13442 TG3_RX_PTP_CTL_DELAY_REQ;
13444 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13445 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13446 TG3_RX_PTP_CTL_DELAY_REQ;
13448 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13449 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13450 TG3_RX_PTP_CTL_DELAY_REQ;
13456 if (netif_running(dev) && tp->rxptpctl)
13457 tw32(TG3_RX_PTP_CTL,
13458 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13460 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13464 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13466 struct mii_ioctl_data *data = if_mii(ifr);
13467 struct tg3 *tp = netdev_priv(dev);
13470 if (tg3_flag(tp, USE_PHYLIB)) {
13471 struct phy_device *phydev;
13472 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13474 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13475 return phy_mii_ioctl(phydev, ifr, cmd);
13480 data->phy_id = tp->phy_addr;
13483 case SIOCGMIIREG: {
13486 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13487 break; /* We have no PHY */
13489 if (!netif_running(dev))
13492 spin_lock_bh(&tp->lock);
13493 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13494 data->reg_num & 0x1f, &mii_regval);
13495 spin_unlock_bh(&tp->lock);
13497 data->val_out = mii_regval;
13503 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13504 break; /* We have no PHY */
13506 if (!netif_running(dev))
13509 spin_lock_bh(&tp->lock);
13510 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13511 data->reg_num & 0x1f, data->val_in);
13512 spin_unlock_bh(&tp->lock);
13516 case SIOCSHWTSTAMP:
13517 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13523 return -EOPNOTSUPP;
13526 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13528 struct tg3 *tp = netdev_priv(dev);
13530 memcpy(ec, &tp->coal, sizeof(*ec));
13534 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13536 struct tg3 *tp = netdev_priv(dev);
13537 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13538 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13540 if (!tg3_flag(tp, 5705_PLUS)) {
13541 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13542 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13543 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13544 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13547 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13548 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13549 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13550 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13551 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13552 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13553 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13554 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13555 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13556 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13559 /* No rx interrupts will be generated if both are zero */
13560 if ((ec->rx_coalesce_usecs == 0) &&
13561 (ec->rx_max_coalesced_frames == 0))
13564 /* No tx interrupts will be generated if both are zero */
13565 if ((ec->tx_coalesce_usecs == 0) &&
13566 (ec->tx_max_coalesced_frames == 0))
13569 /* Only copy relevant parameters, ignore all others. */
13570 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13571 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13572 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13573 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13574 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13575 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13576 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13577 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13578 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13580 if (netif_running(dev)) {
13581 tg3_full_lock(tp, 0);
13582 __tg3_set_coalesce(tp, &tp->coal);
13583 tg3_full_unlock(tp);
13588 static const struct ethtool_ops tg3_ethtool_ops = {
13589 .get_settings = tg3_get_settings,
13590 .set_settings = tg3_set_settings,
13591 .get_drvinfo = tg3_get_drvinfo,
13592 .get_regs_len = tg3_get_regs_len,
13593 .get_regs = tg3_get_regs,
13594 .get_wol = tg3_get_wol,
13595 .set_wol = tg3_set_wol,
13596 .get_msglevel = tg3_get_msglevel,
13597 .set_msglevel = tg3_set_msglevel,
13598 .nway_reset = tg3_nway_reset,
13599 .get_link = ethtool_op_get_link,
13600 .get_eeprom_len = tg3_get_eeprom_len,
13601 .get_eeprom = tg3_get_eeprom,
13602 .set_eeprom = tg3_set_eeprom,
13603 .get_ringparam = tg3_get_ringparam,
13604 .set_ringparam = tg3_set_ringparam,
13605 .get_pauseparam = tg3_get_pauseparam,
13606 .set_pauseparam = tg3_set_pauseparam,
13607 .self_test = tg3_self_test,
13608 .get_strings = tg3_get_strings,
13609 .set_phys_id = tg3_set_phys_id,
13610 .get_ethtool_stats = tg3_get_ethtool_stats,
13611 .get_coalesce = tg3_get_coalesce,
13612 .set_coalesce = tg3_set_coalesce,
13613 .get_sset_count = tg3_get_sset_count,
13614 .get_rxnfc = tg3_get_rxnfc,
13615 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13616 .get_rxfh_indir = tg3_get_rxfh_indir,
13617 .set_rxfh_indir = tg3_set_rxfh_indir,
13618 .get_channels = tg3_get_channels,
13619 .set_channels = tg3_set_channels,
13620 .get_ts_info = tg3_get_ts_info,
13623 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13624 struct rtnl_link_stats64 *stats)
13626 struct tg3 *tp = netdev_priv(dev);
13628 spin_lock_bh(&tp->lock);
13629 if (!tp->hw_stats) {
13630 spin_unlock_bh(&tp->lock);
13631 return &tp->net_stats_prev;
13634 tg3_get_nstats(tp, stats);
13635 spin_unlock_bh(&tp->lock);
13640 static void tg3_set_rx_mode(struct net_device *dev)
13642 struct tg3 *tp = netdev_priv(dev);
13644 if (!netif_running(dev))
13647 tg3_full_lock(tp, 0);
13648 __tg3_set_rx_mode(dev);
13649 tg3_full_unlock(tp);
13652 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13655 dev->mtu = new_mtu;
13657 if (new_mtu > ETH_DATA_LEN) {
13658 if (tg3_flag(tp, 5780_CLASS)) {
13659 netdev_update_features(dev);
13660 tg3_flag_clear(tp, TSO_CAPABLE);
13662 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13665 if (tg3_flag(tp, 5780_CLASS)) {
13666 tg3_flag_set(tp, TSO_CAPABLE);
13667 netdev_update_features(dev);
13669 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13673 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13675 struct tg3 *tp = netdev_priv(dev);
13677 bool reset_phy = false;
13679 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13682 if (!netif_running(dev)) {
13683 /* We'll just catch it later when the
13686 tg3_set_mtu(dev, tp, new_mtu);
13692 tg3_netif_stop(tp);
13694 tg3_full_lock(tp, 1);
13696 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13698 tg3_set_mtu(dev, tp, new_mtu);
13700 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13701 * breaks all requests to 256 bytes.
13703 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13706 err = tg3_restart_hw(tp, reset_phy);
13709 tg3_netif_start(tp);
13711 tg3_full_unlock(tp);
13719 static const struct net_device_ops tg3_netdev_ops = {
13720 .ndo_open = tg3_open,
13721 .ndo_stop = tg3_close,
13722 .ndo_start_xmit = tg3_start_xmit,
13723 .ndo_get_stats64 = tg3_get_stats64,
13724 .ndo_validate_addr = eth_validate_addr,
13725 .ndo_set_rx_mode = tg3_set_rx_mode,
13726 .ndo_set_mac_address = tg3_set_mac_addr,
13727 .ndo_do_ioctl = tg3_ioctl,
13728 .ndo_tx_timeout = tg3_tx_timeout,
13729 .ndo_change_mtu = tg3_change_mtu,
13730 .ndo_fix_features = tg3_fix_features,
13731 .ndo_set_features = tg3_set_features,
13732 #ifdef CONFIG_NET_POLL_CONTROLLER
13733 .ndo_poll_controller = tg3_poll_controller,
13737 static void tg3_get_eeprom_size(struct tg3 *tp)
13739 u32 cursize, val, magic;
13741 tp->nvram_size = EEPROM_CHIP_SIZE;
13743 if (tg3_nvram_read(tp, 0, &magic) != 0)
13746 if ((magic != TG3_EEPROM_MAGIC) &&
13747 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13748 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13752 * Size the chip by reading offsets at increasing powers of two.
13753 * When we encounter our validation signature, we know the addressing
13754 * has wrapped around, and thus have our chip size.
13758 while (cursize < tp->nvram_size) {
13759 if (tg3_nvram_read(tp, cursize, &val) != 0)
13768 tp->nvram_size = cursize;
13771 static void tg3_get_nvram_size(struct tg3 *tp)
13775 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13778 /* Selfboot format */
13779 if (val != TG3_EEPROM_MAGIC) {
13780 tg3_get_eeprom_size(tp);
13784 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13786 /* This is confusing. We want to operate on the
13787 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13788 * call will read from NVRAM and byteswap the data
13789 * according to the byteswapping settings for all
13790 * other register accesses. This ensures the data we
13791 * want will always reside in the lower 16-bits.
13792 * However, the data in NVRAM is in LE format, which
13793 * means the data from the NVRAM read will always be
13794 * opposite the endianness of the CPU. The 16-bit
13795 * byteswap then brings the data to CPU endianness.
13797 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13801 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13804 static void tg3_get_nvram_info(struct tg3 *tp)
13808 nvcfg1 = tr32(NVRAM_CFG1);
13809 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13810 tg3_flag_set(tp, FLASH);
13812 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13813 tw32(NVRAM_CFG1, nvcfg1);
13816 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13817 tg3_flag(tp, 5780_CLASS)) {
13818 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13819 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13820 tp->nvram_jedecnum = JEDEC_ATMEL;
13821 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13822 tg3_flag_set(tp, NVRAM_BUFFERED);
13824 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13825 tp->nvram_jedecnum = JEDEC_ATMEL;
13826 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13828 case FLASH_VENDOR_ATMEL_EEPROM:
13829 tp->nvram_jedecnum = JEDEC_ATMEL;
13830 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13831 tg3_flag_set(tp, NVRAM_BUFFERED);
13833 case FLASH_VENDOR_ST:
13834 tp->nvram_jedecnum = JEDEC_ST;
13835 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13836 tg3_flag_set(tp, NVRAM_BUFFERED);
13838 case FLASH_VENDOR_SAIFUN:
13839 tp->nvram_jedecnum = JEDEC_SAIFUN;
13840 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13842 case FLASH_VENDOR_SST_SMALL:
13843 case FLASH_VENDOR_SST_LARGE:
13844 tp->nvram_jedecnum = JEDEC_SST;
13845 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13849 tp->nvram_jedecnum = JEDEC_ATMEL;
13850 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13851 tg3_flag_set(tp, NVRAM_BUFFERED);
13855 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13857 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13858 case FLASH_5752PAGE_SIZE_256:
13859 tp->nvram_pagesize = 256;
13861 case FLASH_5752PAGE_SIZE_512:
13862 tp->nvram_pagesize = 512;
13864 case FLASH_5752PAGE_SIZE_1K:
13865 tp->nvram_pagesize = 1024;
13867 case FLASH_5752PAGE_SIZE_2K:
13868 tp->nvram_pagesize = 2048;
13870 case FLASH_5752PAGE_SIZE_4K:
13871 tp->nvram_pagesize = 4096;
13873 case FLASH_5752PAGE_SIZE_264:
13874 tp->nvram_pagesize = 264;
13876 case FLASH_5752PAGE_SIZE_528:
13877 tp->nvram_pagesize = 528;
13882 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13886 nvcfg1 = tr32(NVRAM_CFG1);
13888 /* NVRAM protection for TPM */
13889 if (nvcfg1 & (1 << 27))
13890 tg3_flag_set(tp, PROTECTED_NVRAM);
13892 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13893 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13894 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13895 tp->nvram_jedecnum = JEDEC_ATMEL;
13896 tg3_flag_set(tp, NVRAM_BUFFERED);
13898 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13899 tp->nvram_jedecnum = JEDEC_ATMEL;
13900 tg3_flag_set(tp, NVRAM_BUFFERED);
13901 tg3_flag_set(tp, FLASH);
13903 case FLASH_5752VENDOR_ST_M45PE10:
13904 case FLASH_5752VENDOR_ST_M45PE20:
13905 case FLASH_5752VENDOR_ST_M45PE40:
13906 tp->nvram_jedecnum = JEDEC_ST;
13907 tg3_flag_set(tp, NVRAM_BUFFERED);
13908 tg3_flag_set(tp, FLASH);
13912 if (tg3_flag(tp, FLASH)) {
13913 tg3_nvram_get_pagesize(tp, nvcfg1);
13915 /* For eeprom, set pagesize to maximum eeprom size */
13916 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13918 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13919 tw32(NVRAM_CFG1, nvcfg1);
13923 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13925 u32 nvcfg1, protect = 0;
13927 nvcfg1 = tr32(NVRAM_CFG1);
13929 /* NVRAM protection for TPM */
13930 if (nvcfg1 & (1 << 27)) {
13931 tg3_flag_set(tp, PROTECTED_NVRAM);
13935 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13937 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13938 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13939 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13940 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13941 tp->nvram_jedecnum = JEDEC_ATMEL;
13942 tg3_flag_set(tp, NVRAM_BUFFERED);
13943 tg3_flag_set(tp, FLASH);
13944 tp->nvram_pagesize = 264;
13945 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13946 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13947 tp->nvram_size = (protect ? 0x3e200 :
13948 TG3_NVRAM_SIZE_512KB);
13949 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13950 tp->nvram_size = (protect ? 0x1f200 :
13951 TG3_NVRAM_SIZE_256KB);
13953 tp->nvram_size = (protect ? 0x1f200 :
13954 TG3_NVRAM_SIZE_128KB);
13956 case FLASH_5752VENDOR_ST_M45PE10:
13957 case FLASH_5752VENDOR_ST_M45PE20:
13958 case FLASH_5752VENDOR_ST_M45PE40:
13959 tp->nvram_jedecnum = JEDEC_ST;
13960 tg3_flag_set(tp, NVRAM_BUFFERED);
13961 tg3_flag_set(tp, FLASH);
13962 tp->nvram_pagesize = 256;
13963 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13964 tp->nvram_size = (protect ?
13965 TG3_NVRAM_SIZE_64KB :
13966 TG3_NVRAM_SIZE_128KB);
13967 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13968 tp->nvram_size = (protect ?
13969 TG3_NVRAM_SIZE_64KB :
13970 TG3_NVRAM_SIZE_256KB);
13972 tp->nvram_size = (protect ?
13973 TG3_NVRAM_SIZE_128KB :
13974 TG3_NVRAM_SIZE_512KB);
13979 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13983 nvcfg1 = tr32(NVRAM_CFG1);
13985 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13986 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13987 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13988 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13989 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13990 tp->nvram_jedecnum = JEDEC_ATMEL;
13991 tg3_flag_set(tp, NVRAM_BUFFERED);
13992 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13994 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13995 tw32(NVRAM_CFG1, nvcfg1);
13997 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13998 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13999 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14000 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14001 tp->nvram_jedecnum = JEDEC_ATMEL;
14002 tg3_flag_set(tp, NVRAM_BUFFERED);
14003 tg3_flag_set(tp, FLASH);
14004 tp->nvram_pagesize = 264;
14006 case FLASH_5752VENDOR_ST_M45PE10:
14007 case FLASH_5752VENDOR_ST_M45PE20:
14008 case FLASH_5752VENDOR_ST_M45PE40:
14009 tp->nvram_jedecnum = JEDEC_ST;
14010 tg3_flag_set(tp, NVRAM_BUFFERED);
14011 tg3_flag_set(tp, FLASH);
14012 tp->nvram_pagesize = 256;
14017 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14019 u32 nvcfg1, protect = 0;
14021 nvcfg1 = tr32(NVRAM_CFG1);
14023 /* NVRAM protection for TPM */
14024 if (nvcfg1 & (1 << 27)) {
14025 tg3_flag_set(tp, PROTECTED_NVRAM);
14029 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14031 case FLASH_5761VENDOR_ATMEL_ADB021D:
14032 case FLASH_5761VENDOR_ATMEL_ADB041D:
14033 case FLASH_5761VENDOR_ATMEL_ADB081D:
14034 case FLASH_5761VENDOR_ATMEL_ADB161D:
14035 case FLASH_5761VENDOR_ATMEL_MDB021D:
14036 case FLASH_5761VENDOR_ATMEL_MDB041D:
14037 case FLASH_5761VENDOR_ATMEL_MDB081D:
14038 case FLASH_5761VENDOR_ATMEL_MDB161D:
14039 tp->nvram_jedecnum = JEDEC_ATMEL;
14040 tg3_flag_set(tp, NVRAM_BUFFERED);
14041 tg3_flag_set(tp, FLASH);
14042 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14043 tp->nvram_pagesize = 256;
14045 case FLASH_5761VENDOR_ST_A_M45PE20:
14046 case FLASH_5761VENDOR_ST_A_M45PE40:
14047 case FLASH_5761VENDOR_ST_A_M45PE80:
14048 case FLASH_5761VENDOR_ST_A_M45PE16:
14049 case FLASH_5761VENDOR_ST_M_M45PE20:
14050 case FLASH_5761VENDOR_ST_M_M45PE40:
14051 case FLASH_5761VENDOR_ST_M_M45PE80:
14052 case FLASH_5761VENDOR_ST_M_M45PE16:
14053 tp->nvram_jedecnum = JEDEC_ST;
14054 tg3_flag_set(tp, NVRAM_BUFFERED);
14055 tg3_flag_set(tp, FLASH);
14056 tp->nvram_pagesize = 256;
14061 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14064 case FLASH_5761VENDOR_ATMEL_ADB161D:
14065 case FLASH_5761VENDOR_ATMEL_MDB161D:
14066 case FLASH_5761VENDOR_ST_A_M45PE16:
14067 case FLASH_5761VENDOR_ST_M_M45PE16:
14068 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14070 case FLASH_5761VENDOR_ATMEL_ADB081D:
14071 case FLASH_5761VENDOR_ATMEL_MDB081D:
14072 case FLASH_5761VENDOR_ST_A_M45PE80:
14073 case FLASH_5761VENDOR_ST_M_M45PE80:
14074 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14076 case FLASH_5761VENDOR_ATMEL_ADB041D:
14077 case FLASH_5761VENDOR_ATMEL_MDB041D:
14078 case FLASH_5761VENDOR_ST_A_M45PE40:
14079 case FLASH_5761VENDOR_ST_M_M45PE40:
14080 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14082 case FLASH_5761VENDOR_ATMEL_ADB021D:
14083 case FLASH_5761VENDOR_ATMEL_MDB021D:
14084 case FLASH_5761VENDOR_ST_A_M45PE20:
14085 case FLASH_5761VENDOR_ST_M_M45PE20:
14086 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14092 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14094 tp->nvram_jedecnum = JEDEC_ATMEL;
14095 tg3_flag_set(tp, NVRAM_BUFFERED);
14096 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14099 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14103 nvcfg1 = tr32(NVRAM_CFG1);
14105 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14106 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14107 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14108 tp->nvram_jedecnum = JEDEC_ATMEL;
14109 tg3_flag_set(tp, NVRAM_BUFFERED);
14110 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14112 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14113 tw32(NVRAM_CFG1, nvcfg1);
14115 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14116 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14117 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14118 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14119 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14120 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14121 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14122 tp->nvram_jedecnum = JEDEC_ATMEL;
14123 tg3_flag_set(tp, NVRAM_BUFFERED);
14124 tg3_flag_set(tp, FLASH);
14126 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14127 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14128 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14129 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14130 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14132 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14133 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14134 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14136 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14137 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14138 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14142 case FLASH_5752VENDOR_ST_M45PE10:
14143 case FLASH_5752VENDOR_ST_M45PE20:
14144 case FLASH_5752VENDOR_ST_M45PE40:
14145 tp->nvram_jedecnum = JEDEC_ST;
14146 tg3_flag_set(tp, NVRAM_BUFFERED);
14147 tg3_flag_set(tp, FLASH);
14149 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14150 case FLASH_5752VENDOR_ST_M45PE10:
14151 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14153 case FLASH_5752VENDOR_ST_M45PE20:
14154 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14156 case FLASH_5752VENDOR_ST_M45PE40:
14157 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14162 tg3_flag_set(tp, NO_NVRAM);
14166 tg3_nvram_get_pagesize(tp, nvcfg1);
14167 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14168 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14172 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14176 nvcfg1 = tr32(NVRAM_CFG1);
14178 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14179 case FLASH_5717VENDOR_ATMEL_EEPROM:
14180 case FLASH_5717VENDOR_MICRO_EEPROM:
14181 tp->nvram_jedecnum = JEDEC_ATMEL;
14182 tg3_flag_set(tp, NVRAM_BUFFERED);
14183 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14185 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14186 tw32(NVRAM_CFG1, nvcfg1);
14188 case FLASH_5717VENDOR_ATMEL_MDB011D:
14189 case FLASH_5717VENDOR_ATMEL_ADB011B:
14190 case FLASH_5717VENDOR_ATMEL_ADB011D:
14191 case FLASH_5717VENDOR_ATMEL_MDB021D:
14192 case FLASH_5717VENDOR_ATMEL_ADB021B:
14193 case FLASH_5717VENDOR_ATMEL_ADB021D:
14194 case FLASH_5717VENDOR_ATMEL_45USPT:
14195 tp->nvram_jedecnum = JEDEC_ATMEL;
14196 tg3_flag_set(tp, NVRAM_BUFFERED);
14197 tg3_flag_set(tp, FLASH);
14199 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14200 case FLASH_5717VENDOR_ATMEL_MDB021D:
14201 /* Detect size with tg3_nvram_get_size() */
14203 case FLASH_5717VENDOR_ATMEL_ADB021B:
14204 case FLASH_5717VENDOR_ATMEL_ADB021D:
14205 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14208 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14212 case FLASH_5717VENDOR_ST_M_M25PE10:
14213 case FLASH_5717VENDOR_ST_A_M25PE10:
14214 case FLASH_5717VENDOR_ST_M_M45PE10:
14215 case FLASH_5717VENDOR_ST_A_M45PE10:
14216 case FLASH_5717VENDOR_ST_M_M25PE20:
14217 case FLASH_5717VENDOR_ST_A_M25PE20:
14218 case FLASH_5717VENDOR_ST_M_M45PE20:
14219 case FLASH_5717VENDOR_ST_A_M45PE20:
14220 case FLASH_5717VENDOR_ST_25USPT:
14221 case FLASH_5717VENDOR_ST_45USPT:
14222 tp->nvram_jedecnum = JEDEC_ST;
14223 tg3_flag_set(tp, NVRAM_BUFFERED);
14224 tg3_flag_set(tp, FLASH);
14226 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14227 case FLASH_5717VENDOR_ST_M_M25PE20:
14228 case FLASH_5717VENDOR_ST_M_M45PE20:
14229 /* Detect size with tg3_nvram_get_size() */
14231 case FLASH_5717VENDOR_ST_A_M25PE20:
14232 case FLASH_5717VENDOR_ST_A_M45PE20:
14233 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14236 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14241 tg3_flag_set(tp, NO_NVRAM);
14245 tg3_nvram_get_pagesize(tp, nvcfg1);
14246 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14247 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14250 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14252 u32 nvcfg1, nvmpinstrp;
14254 nvcfg1 = tr32(NVRAM_CFG1);
14255 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14257 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14258 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14259 tg3_flag_set(tp, NO_NVRAM);
14263 switch (nvmpinstrp) {
14264 case FLASH_5762_EEPROM_HD:
14265 nvmpinstrp = FLASH_5720_EEPROM_HD;
14267 case FLASH_5762_EEPROM_LD:
14268 nvmpinstrp = FLASH_5720_EEPROM_LD;
14270 case FLASH_5720VENDOR_M_ST_M45PE20:
14271 /* This pinstrap supports multiple sizes, so force it
14272 * to read the actual size from location 0xf0.
14274 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14279 switch (nvmpinstrp) {
14280 case FLASH_5720_EEPROM_HD:
14281 case FLASH_5720_EEPROM_LD:
14282 tp->nvram_jedecnum = JEDEC_ATMEL;
14283 tg3_flag_set(tp, NVRAM_BUFFERED);
14285 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14286 tw32(NVRAM_CFG1, nvcfg1);
14287 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14288 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14290 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14292 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14293 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14294 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14295 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14296 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14297 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14298 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14299 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14300 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14301 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14302 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14303 case FLASH_5720VENDOR_ATMEL_45USPT:
14304 tp->nvram_jedecnum = JEDEC_ATMEL;
14305 tg3_flag_set(tp, NVRAM_BUFFERED);
14306 tg3_flag_set(tp, FLASH);
14308 switch (nvmpinstrp) {
14309 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14310 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14311 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14312 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14314 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14315 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14316 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14317 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14319 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14320 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14321 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14324 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14325 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14329 case FLASH_5720VENDOR_M_ST_M25PE10:
14330 case FLASH_5720VENDOR_M_ST_M45PE10:
14331 case FLASH_5720VENDOR_A_ST_M25PE10:
14332 case FLASH_5720VENDOR_A_ST_M45PE10:
14333 case FLASH_5720VENDOR_M_ST_M25PE20:
14334 case FLASH_5720VENDOR_M_ST_M45PE20:
14335 case FLASH_5720VENDOR_A_ST_M25PE20:
14336 case FLASH_5720VENDOR_A_ST_M45PE20:
14337 case FLASH_5720VENDOR_M_ST_M25PE40:
14338 case FLASH_5720VENDOR_M_ST_M45PE40:
14339 case FLASH_5720VENDOR_A_ST_M25PE40:
14340 case FLASH_5720VENDOR_A_ST_M45PE40:
14341 case FLASH_5720VENDOR_M_ST_M25PE80:
14342 case FLASH_5720VENDOR_M_ST_M45PE80:
14343 case FLASH_5720VENDOR_A_ST_M25PE80:
14344 case FLASH_5720VENDOR_A_ST_M45PE80:
14345 case FLASH_5720VENDOR_ST_25USPT:
14346 case FLASH_5720VENDOR_ST_45USPT:
14347 tp->nvram_jedecnum = JEDEC_ST;
14348 tg3_flag_set(tp, NVRAM_BUFFERED);
14349 tg3_flag_set(tp, FLASH);
14351 switch (nvmpinstrp) {
14352 case FLASH_5720VENDOR_M_ST_M25PE20:
14353 case FLASH_5720VENDOR_M_ST_M45PE20:
14354 case FLASH_5720VENDOR_A_ST_M25PE20:
14355 case FLASH_5720VENDOR_A_ST_M45PE20:
14356 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14358 case FLASH_5720VENDOR_M_ST_M25PE40:
14359 case FLASH_5720VENDOR_M_ST_M45PE40:
14360 case FLASH_5720VENDOR_A_ST_M25PE40:
14361 case FLASH_5720VENDOR_A_ST_M45PE40:
14362 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14364 case FLASH_5720VENDOR_M_ST_M25PE80:
14365 case FLASH_5720VENDOR_M_ST_M45PE80:
14366 case FLASH_5720VENDOR_A_ST_M25PE80:
14367 case FLASH_5720VENDOR_A_ST_M45PE80:
14368 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14371 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14372 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14377 tg3_flag_set(tp, NO_NVRAM);
14381 tg3_nvram_get_pagesize(tp, nvcfg1);
14382 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14383 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14385 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14388 if (tg3_nvram_read(tp, 0, &val))
14391 if (val != TG3_EEPROM_MAGIC &&
14392 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14393 tg3_flag_set(tp, NO_NVRAM);
14397 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14398 static void tg3_nvram_init(struct tg3 *tp)
14400 if (tg3_flag(tp, IS_SSB_CORE)) {
14401 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14402 tg3_flag_clear(tp, NVRAM);
14403 tg3_flag_clear(tp, NVRAM_BUFFERED);
14404 tg3_flag_set(tp, NO_NVRAM);
14408 tw32_f(GRC_EEPROM_ADDR,
14409 (EEPROM_ADDR_FSM_RESET |
14410 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14411 EEPROM_ADDR_CLKPERD_SHIFT)));
14415 /* Enable seeprom accesses. */
14416 tw32_f(GRC_LOCAL_CTRL,
14417 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14420 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14421 tg3_asic_rev(tp) != ASIC_REV_5701) {
14422 tg3_flag_set(tp, NVRAM);
14424 if (tg3_nvram_lock(tp)) {
14425 netdev_warn(tp->dev,
14426 "Cannot get nvram lock, %s failed\n",
14430 tg3_enable_nvram_access(tp);
14432 tp->nvram_size = 0;
14434 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14435 tg3_get_5752_nvram_info(tp);
14436 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14437 tg3_get_5755_nvram_info(tp);
14438 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14439 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14440 tg3_asic_rev(tp) == ASIC_REV_5785)
14441 tg3_get_5787_nvram_info(tp);
14442 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14443 tg3_get_5761_nvram_info(tp);
14444 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14445 tg3_get_5906_nvram_info(tp);
14446 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14447 tg3_flag(tp, 57765_CLASS))
14448 tg3_get_57780_nvram_info(tp);
14449 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14450 tg3_asic_rev(tp) == ASIC_REV_5719)
14451 tg3_get_5717_nvram_info(tp);
14452 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14453 tg3_asic_rev(tp) == ASIC_REV_5762)
14454 tg3_get_5720_nvram_info(tp);
14456 tg3_get_nvram_info(tp);
14458 if (tp->nvram_size == 0)
14459 tg3_get_nvram_size(tp);
14461 tg3_disable_nvram_access(tp);
14462 tg3_nvram_unlock(tp);
14465 tg3_flag_clear(tp, NVRAM);
14466 tg3_flag_clear(tp, NVRAM_BUFFERED);
14468 tg3_get_eeprom_size(tp);
14472 struct subsys_tbl_ent {
14473 u16 subsys_vendor, subsys_devid;
14477 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14478 /* Broadcom boards. */
14479 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14480 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14481 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14482 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14483 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14484 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14485 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14486 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14487 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14488 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14489 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14490 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14491 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14492 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14493 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14494 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14495 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14496 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14497 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14498 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14499 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14500 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14503 { TG3PCI_SUBVENDOR_ID_3COM,
14504 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14505 { TG3PCI_SUBVENDOR_ID_3COM,
14506 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14507 { TG3PCI_SUBVENDOR_ID_3COM,
14508 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14509 { TG3PCI_SUBVENDOR_ID_3COM,
14510 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14511 { TG3PCI_SUBVENDOR_ID_3COM,
14512 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14515 { TG3PCI_SUBVENDOR_ID_DELL,
14516 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14517 { TG3PCI_SUBVENDOR_ID_DELL,
14518 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14519 { TG3PCI_SUBVENDOR_ID_DELL,
14520 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14521 { TG3PCI_SUBVENDOR_ID_DELL,
14522 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14524 /* Compaq boards. */
14525 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14526 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14527 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14528 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14529 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14530 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14531 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14532 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14533 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14534 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14537 { TG3PCI_SUBVENDOR_ID_IBM,
14538 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14541 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14545 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14546 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14547 tp->pdev->subsystem_vendor) &&
14548 (subsys_id_to_phy_id[i].subsys_devid ==
14549 tp->pdev->subsystem_device))
14550 return &subsys_id_to_phy_id[i];
14555 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14559 tp->phy_id = TG3_PHY_ID_INVALID;
14560 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14562 /* Assume an onboard device and WOL capable by default. */
14563 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14564 tg3_flag_set(tp, WOL_CAP);
14566 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14567 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14568 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14569 tg3_flag_set(tp, IS_NIC);
14571 val = tr32(VCPU_CFGSHDW);
14572 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14573 tg3_flag_set(tp, ASPM_WORKAROUND);
14574 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14575 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14576 tg3_flag_set(tp, WOL_ENABLE);
14577 device_set_wakeup_enable(&tp->pdev->dev, true);
14582 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14583 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14584 u32 nic_cfg, led_cfg;
14585 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14586 int eeprom_phy_serdes = 0;
14588 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14589 tp->nic_sram_data_cfg = nic_cfg;
14591 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14592 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14593 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14594 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14595 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14596 (ver > 0) && (ver < 0x100))
14597 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14599 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14600 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14602 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14603 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14604 eeprom_phy_serdes = 1;
14606 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14607 if (nic_phy_id != 0) {
14608 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14609 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14611 eeprom_phy_id = (id1 >> 16) << 10;
14612 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14613 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14617 tp->phy_id = eeprom_phy_id;
14618 if (eeprom_phy_serdes) {
14619 if (!tg3_flag(tp, 5705_PLUS))
14620 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14622 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14625 if (tg3_flag(tp, 5750_PLUS))
14626 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14627 SHASTA_EXT_LED_MODE_MASK);
14629 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14633 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14634 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14637 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14638 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14641 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14642 tp->led_ctrl = LED_CTRL_MODE_MAC;
14644 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14645 * read on some older 5700/5701 bootcode.
14647 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14648 tg3_asic_rev(tp) == ASIC_REV_5701)
14649 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14653 case SHASTA_EXT_LED_SHARED:
14654 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14655 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14656 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14657 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14658 LED_CTRL_MODE_PHY_2);
14661 case SHASTA_EXT_LED_MAC:
14662 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14665 case SHASTA_EXT_LED_COMBO:
14666 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14667 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14668 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14669 LED_CTRL_MODE_PHY_2);
14674 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14675 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14676 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14677 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14679 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14680 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14682 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14683 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14684 if ((tp->pdev->subsystem_vendor ==
14685 PCI_VENDOR_ID_ARIMA) &&
14686 (tp->pdev->subsystem_device == 0x205a ||
14687 tp->pdev->subsystem_device == 0x2063))
14688 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14690 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14691 tg3_flag_set(tp, IS_NIC);
14694 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14695 tg3_flag_set(tp, ENABLE_ASF);
14696 if (tg3_flag(tp, 5750_PLUS))
14697 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14700 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14701 tg3_flag(tp, 5750_PLUS))
14702 tg3_flag_set(tp, ENABLE_APE);
14704 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14705 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14706 tg3_flag_clear(tp, WOL_CAP);
14708 if (tg3_flag(tp, WOL_CAP) &&
14709 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14710 tg3_flag_set(tp, WOL_ENABLE);
14711 device_set_wakeup_enable(&tp->pdev->dev, true);
14714 if (cfg2 & (1 << 17))
14715 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14717 /* serdes signal pre-emphasis in register 0x590 set by */
14718 /* bootcode if bit 18 is set */
14719 if (cfg2 & (1 << 18))
14720 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14722 if ((tg3_flag(tp, 57765_PLUS) ||
14723 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14724 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14725 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14726 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14728 if (tg3_flag(tp, PCI_EXPRESS)) {
14731 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14732 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14733 !tg3_flag(tp, 57765_PLUS) &&
14734 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14735 tg3_flag_set(tp, ASPM_WORKAROUND);
14736 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14737 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14738 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14739 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14742 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14743 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14744 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14745 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14746 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14747 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14750 if (tg3_flag(tp, WOL_CAP))
14751 device_set_wakeup_enable(&tp->pdev->dev,
14752 tg3_flag(tp, WOL_ENABLE));
14754 device_set_wakeup_capable(&tp->pdev->dev, false);
14757 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14760 u32 val2, off = offset * 8;
14762 err = tg3_nvram_lock(tp);
14766 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14767 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14768 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14769 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14772 for (i = 0; i < 100; i++) {
14773 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14774 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14775 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14781 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14783 tg3_nvram_unlock(tp);
14784 if (val2 & APE_OTP_STATUS_CMD_DONE)
14790 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14795 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14796 tw32(OTP_CTRL, cmd);
14798 /* Wait for up to 1 ms for command to execute. */
14799 for (i = 0; i < 100; i++) {
14800 val = tr32(OTP_STATUS);
14801 if (val & OTP_STATUS_CMD_DONE)
14806 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14809 /* Read the gphy configuration from the OTP region of the chip. The gphy
14810 * configuration is a 32-bit value that straddles the alignment boundary.
14811 * We do two 32-bit reads and then shift and merge the results.
14813 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14815 u32 bhalf_otp, thalf_otp;
14817 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14819 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14822 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14824 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14827 thalf_otp = tr32(OTP_READ_DATA);
14829 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14831 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14834 bhalf_otp = tr32(OTP_READ_DATA);
14836 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14839 static void tg3_phy_init_link_config(struct tg3 *tp)
14841 u32 adv = ADVERTISED_Autoneg;
14843 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14844 adv |= ADVERTISED_1000baseT_Half |
14845 ADVERTISED_1000baseT_Full;
14847 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14848 adv |= ADVERTISED_100baseT_Half |
14849 ADVERTISED_100baseT_Full |
14850 ADVERTISED_10baseT_Half |
14851 ADVERTISED_10baseT_Full |
14854 adv |= ADVERTISED_FIBRE;
14856 tp->link_config.advertising = adv;
14857 tp->link_config.speed = SPEED_UNKNOWN;
14858 tp->link_config.duplex = DUPLEX_UNKNOWN;
14859 tp->link_config.autoneg = AUTONEG_ENABLE;
14860 tp->link_config.active_speed = SPEED_UNKNOWN;
14861 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14866 static int tg3_phy_probe(struct tg3 *tp)
14868 u32 hw_phy_id_1, hw_phy_id_2;
14869 u32 hw_phy_id, hw_phy_id_masked;
14872 /* flow control autonegotiation is default behavior */
14873 tg3_flag_set(tp, PAUSE_AUTONEG);
14874 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14876 if (tg3_flag(tp, ENABLE_APE)) {
14877 switch (tp->pci_fn) {
14879 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14882 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14885 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14888 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14893 if (!tg3_flag(tp, ENABLE_ASF) &&
14894 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14895 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14896 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14897 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14899 if (tg3_flag(tp, USE_PHYLIB))
14900 return tg3_phy_init(tp);
14902 /* Reading the PHY ID register can conflict with ASF
14903 * firmware access to the PHY hardware.
14906 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14907 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14909 /* Now read the physical PHY_ID from the chip and verify
14910 * that it is sane. If it doesn't look good, we fall back
14911 * to either the hard-coded table based PHY_ID and failing
14912 * that the value found in the eeprom area.
14914 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14915 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14917 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14918 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14919 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14921 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14924 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14925 tp->phy_id = hw_phy_id;
14926 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14927 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14929 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14931 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14932 /* Do nothing, phy ID already set up in
14933 * tg3_get_eeprom_hw_cfg().
14936 struct subsys_tbl_ent *p;
14938 /* No eeprom signature? Try the hardcoded
14939 * subsys device table.
14941 p = tg3_lookup_by_subsys(tp);
14943 tp->phy_id = p->phy_id;
14944 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14945 /* For now we saw the IDs 0xbc050cd0,
14946 * 0xbc050f80 and 0xbc050c30 on devices
14947 * connected to an BCM4785 and there are
14948 * probably more. Just assume that the phy is
14949 * supported when it is connected to a SSB core
14956 tp->phy_id == TG3_PHY_ID_BCM8002)
14957 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14961 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14962 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14963 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14964 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14965 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14966 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14967 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14968 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14969 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14970 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14972 tg3_phy_init_link_config(tp);
14974 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
14975 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14976 !tg3_flag(tp, ENABLE_APE) &&
14977 !tg3_flag(tp, ENABLE_ASF)) {
14980 tg3_readphy(tp, MII_BMSR, &bmsr);
14981 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14982 (bmsr & BMSR_LSTATUS))
14983 goto skip_phy_reset;
14985 err = tg3_phy_reset(tp);
14989 tg3_phy_set_wirespeed(tp);
14991 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14992 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14993 tp->link_config.flowctrl);
14995 tg3_writephy(tp, MII_BMCR,
14996 BMCR_ANENABLE | BMCR_ANRESTART);
15001 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15002 err = tg3_init_5401phy_dsp(tp);
15006 err = tg3_init_5401phy_dsp(tp);
15012 static void tg3_read_vpd(struct tg3 *tp)
15015 unsigned int block_end, rosize, len;
15019 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15023 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15025 goto out_not_found;
15027 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15028 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15029 i += PCI_VPD_LRDT_TAG_SIZE;
15031 if (block_end > vpdlen)
15032 goto out_not_found;
15034 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15035 PCI_VPD_RO_KEYWORD_MFR_ID);
15037 len = pci_vpd_info_field_size(&vpd_data[j]);
15039 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15040 if (j + len > block_end || len != 4 ||
15041 memcmp(&vpd_data[j], "1028", 4))
15044 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15045 PCI_VPD_RO_KEYWORD_VENDOR0);
15049 len = pci_vpd_info_field_size(&vpd_data[j]);
15051 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15052 if (j + len > block_end)
15055 if (len >= sizeof(tp->fw_ver))
15056 len = sizeof(tp->fw_ver) - 1;
15057 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15058 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15063 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15064 PCI_VPD_RO_KEYWORD_PARTNO);
15066 goto out_not_found;
15068 len = pci_vpd_info_field_size(&vpd_data[i]);
15070 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15071 if (len > TG3_BPN_SIZE ||
15072 (len + i) > vpdlen)
15073 goto out_not_found;
15075 memcpy(tp->board_part_number, &vpd_data[i], len);
15079 if (tp->board_part_number[0])
15083 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15084 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15085 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15086 strcpy(tp->board_part_number, "BCM5717");
15087 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15088 strcpy(tp->board_part_number, "BCM5718");
15091 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15092 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15093 strcpy(tp->board_part_number, "BCM57780");
15094 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15095 strcpy(tp->board_part_number, "BCM57760");
15096 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15097 strcpy(tp->board_part_number, "BCM57790");
15098 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15099 strcpy(tp->board_part_number, "BCM57788");
15102 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15103 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15104 strcpy(tp->board_part_number, "BCM57761");
15105 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15106 strcpy(tp->board_part_number, "BCM57765");
15107 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15108 strcpy(tp->board_part_number, "BCM57781");
15109 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15110 strcpy(tp->board_part_number, "BCM57785");
15111 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15112 strcpy(tp->board_part_number, "BCM57791");
15113 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15114 strcpy(tp->board_part_number, "BCM57795");
15117 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15118 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15119 strcpy(tp->board_part_number, "BCM57762");
15120 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15121 strcpy(tp->board_part_number, "BCM57766");
15122 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15123 strcpy(tp->board_part_number, "BCM57782");
15124 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15125 strcpy(tp->board_part_number, "BCM57786");
15128 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15129 strcpy(tp->board_part_number, "BCM95906");
15132 strcpy(tp->board_part_number, "none");
15136 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15140 if (tg3_nvram_read(tp, offset, &val) ||
15141 (val & 0xfc000000) != 0x0c000000 ||
15142 tg3_nvram_read(tp, offset + 4, &val) ||
15149 static void tg3_read_bc_ver(struct tg3 *tp)
15151 u32 val, offset, start, ver_offset;
15153 bool newver = false;
15155 if (tg3_nvram_read(tp, 0xc, &offset) ||
15156 tg3_nvram_read(tp, 0x4, &start))
15159 offset = tg3_nvram_logical_addr(tp, offset);
15161 if (tg3_nvram_read(tp, offset, &val))
15164 if ((val & 0xfc000000) == 0x0c000000) {
15165 if (tg3_nvram_read(tp, offset + 4, &val))
15172 dst_off = strlen(tp->fw_ver);
15175 if (TG3_VER_SIZE - dst_off < 16 ||
15176 tg3_nvram_read(tp, offset + 8, &ver_offset))
15179 offset = offset + ver_offset - start;
15180 for (i = 0; i < 16; i += 4) {
15182 if (tg3_nvram_read_be32(tp, offset + i, &v))
15185 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15190 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15193 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15194 TG3_NVM_BCVER_MAJSFT;
15195 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15196 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15197 "v%d.%02d", major, minor);
15201 static void tg3_read_hwsb_ver(struct tg3 *tp)
15203 u32 val, major, minor;
15205 /* Use native endian representation */
15206 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15209 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15210 TG3_NVM_HWSB_CFG1_MAJSFT;
15211 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15212 TG3_NVM_HWSB_CFG1_MINSFT;
15214 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15217 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15219 u32 offset, major, minor, build;
15221 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15223 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15226 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15227 case TG3_EEPROM_SB_REVISION_0:
15228 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15230 case TG3_EEPROM_SB_REVISION_2:
15231 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15233 case TG3_EEPROM_SB_REVISION_3:
15234 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15236 case TG3_EEPROM_SB_REVISION_4:
15237 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15239 case TG3_EEPROM_SB_REVISION_5:
15240 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15242 case TG3_EEPROM_SB_REVISION_6:
15243 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15249 if (tg3_nvram_read(tp, offset, &val))
15252 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15253 TG3_EEPROM_SB_EDH_BLD_SHFT;
15254 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15255 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15256 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15258 if (minor > 99 || build > 26)
15261 offset = strlen(tp->fw_ver);
15262 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15263 " v%d.%02d", major, minor);
15266 offset = strlen(tp->fw_ver);
15267 if (offset < TG3_VER_SIZE - 1)
15268 tp->fw_ver[offset] = 'a' + build - 1;
15272 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15274 u32 val, offset, start;
15277 for (offset = TG3_NVM_DIR_START;
15278 offset < TG3_NVM_DIR_END;
15279 offset += TG3_NVM_DIRENT_SIZE) {
15280 if (tg3_nvram_read(tp, offset, &val))
15283 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15287 if (offset == TG3_NVM_DIR_END)
15290 if (!tg3_flag(tp, 5705_PLUS))
15291 start = 0x08000000;
15292 else if (tg3_nvram_read(tp, offset - 4, &start))
15295 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15296 !tg3_fw_img_is_valid(tp, offset) ||
15297 tg3_nvram_read(tp, offset + 8, &val))
15300 offset += val - start;
15302 vlen = strlen(tp->fw_ver);
15304 tp->fw_ver[vlen++] = ',';
15305 tp->fw_ver[vlen++] = ' ';
15307 for (i = 0; i < 4; i++) {
15309 if (tg3_nvram_read_be32(tp, offset, &v))
15312 offset += sizeof(v);
15314 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15315 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15319 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15324 static void tg3_probe_ncsi(struct tg3 *tp)
15328 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15329 if (apedata != APE_SEG_SIG_MAGIC)
15332 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15333 if (!(apedata & APE_FW_STATUS_READY))
15336 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15337 tg3_flag_set(tp, APE_HAS_NCSI);
15340 static void tg3_read_dash_ver(struct tg3 *tp)
15346 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15348 if (tg3_flag(tp, APE_HAS_NCSI))
15350 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15355 vlen = strlen(tp->fw_ver);
15357 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15359 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15360 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15361 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15362 (apedata & APE_FW_VERSION_BLDMSK));
15365 static void tg3_read_otp_ver(struct tg3 *tp)
15369 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15372 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15373 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15374 TG3_OTP_MAGIC0_VALID(val)) {
15375 u64 val64 = (u64) val << 32 | val2;
15379 for (i = 0; i < 7; i++) {
15380 if ((val64 & 0xff) == 0)
15382 ver = val64 & 0xff;
15385 vlen = strlen(tp->fw_ver);
15386 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15390 static void tg3_read_fw_ver(struct tg3 *tp)
15393 bool vpd_vers = false;
15395 if (tp->fw_ver[0] != 0)
15398 if (tg3_flag(tp, NO_NVRAM)) {
15399 strcat(tp->fw_ver, "sb");
15400 tg3_read_otp_ver(tp);
15404 if (tg3_nvram_read(tp, 0, &val))
15407 if (val == TG3_EEPROM_MAGIC)
15408 tg3_read_bc_ver(tp);
15409 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15410 tg3_read_sb_ver(tp, val);
15411 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15412 tg3_read_hwsb_ver(tp);
15414 if (tg3_flag(tp, ENABLE_ASF)) {
15415 if (tg3_flag(tp, ENABLE_APE)) {
15416 tg3_probe_ncsi(tp);
15418 tg3_read_dash_ver(tp);
15419 } else if (!vpd_vers) {
15420 tg3_read_mgmtfw_ver(tp);
15424 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15427 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15429 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15430 return TG3_RX_RET_MAX_SIZE_5717;
15431 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15432 return TG3_RX_RET_MAX_SIZE_5700;
15434 return TG3_RX_RET_MAX_SIZE_5705;
15437 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15438 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15439 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15440 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15444 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15446 struct pci_dev *peer;
15447 unsigned int func, devnr = tp->pdev->devfn & ~7;
15449 for (func = 0; func < 8; func++) {
15450 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15451 if (peer && peer != tp->pdev)
15455 /* 5704 can be configured in single-port mode, set peer to
15456 * tp->pdev in that case.
15464 * We don't need to keep the refcount elevated; there's no way
15465 * to remove one half of this device without removing the other
15472 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15474 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15475 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15478 /* All devices that use the alternate
15479 * ASIC REV location have a CPMU.
15481 tg3_flag_set(tp, CPMU_PRESENT);
15483 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15484 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15485 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15486 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15487 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15488 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15489 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15490 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15491 reg = TG3PCI_GEN2_PRODID_ASICREV;
15492 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15493 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15494 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15495 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15496 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15497 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15498 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15499 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15500 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15501 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15502 reg = TG3PCI_GEN15_PRODID_ASICREV;
15504 reg = TG3PCI_PRODID_ASICREV;
15506 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15509 /* Wrong chip ID in 5752 A0. This code can be removed later
15510 * as A0 is not in production.
15512 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15513 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15515 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15516 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15518 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15519 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15520 tg3_asic_rev(tp) == ASIC_REV_5720)
15521 tg3_flag_set(tp, 5717_PLUS);
15523 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15524 tg3_asic_rev(tp) == ASIC_REV_57766)
15525 tg3_flag_set(tp, 57765_CLASS);
15527 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15528 tg3_asic_rev(tp) == ASIC_REV_5762)
15529 tg3_flag_set(tp, 57765_PLUS);
15531 /* Intentionally exclude ASIC_REV_5906 */
15532 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15533 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15534 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15535 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15536 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15537 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15538 tg3_flag(tp, 57765_PLUS))
15539 tg3_flag_set(tp, 5755_PLUS);
15541 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15542 tg3_asic_rev(tp) == ASIC_REV_5714)
15543 tg3_flag_set(tp, 5780_CLASS);
15545 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15546 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15547 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15548 tg3_flag(tp, 5755_PLUS) ||
15549 tg3_flag(tp, 5780_CLASS))
15550 tg3_flag_set(tp, 5750_PLUS);
15552 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15553 tg3_flag(tp, 5750_PLUS))
15554 tg3_flag_set(tp, 5705_PLUS);
15557 static bool tg3_10_100_only_device(struct tg3 *tp,
15558 const struct pci_device_id *ent)
15560 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15562 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15563 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15564 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15567 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15568 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15569 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15579 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15582 u32 pci_state_reg, grc_misc_cfg;
15587 /* Force memory write invalidate off. If we leave it on,
15588 * then on 5700_BX chips we have to enable a workaround.
15589 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15590 * to match the cacheline size. The Broadcom driver have this
15591 * workaround but turns MWI off all the times so never uses
15592 * it. This seems to suggest that the workaround is insufficient.
15594 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15595 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15596 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15598 /* Important! -- Make sure register accesses are byteswapped
15599 * correctly. Also, for those chips that require it, make
15600 * sure that indirect register accesses are enabled before
15601 * the first operation.
15603 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15605 tp->misc_host_ctrl |= (misc_ctrl_reg &
15606 MISC_HOST_CTRL_CHIPREV);
15607 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15608 tp->misc_host_ctrl);
15610 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15612 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15613 * we need to disable memory and use config. cycles
15614 * only to access all registers. The 5702/03 chips
15615 * can mistakenly decode the special cycles from the
15616 * ICH chipsets as memory write cycles, causing corruption
15617 * of register and memory space. Only certain ICH bridges
15618 * will drive special cycles with non-zero data during the
15619 * address phase which can fall within the 5703's address
15620 * range. This is not an ICH bug as the PCI spec allows
15621 * non-zero address during special cycles. However, only
15622 * these ICH bridges are known to drive non-zero addresses
15623 * during special cycles.
15625 * Since special cycles do not cross PCI bridges, we only
15626 * enable this workaround if the 5703 is on the secondary
15627 * bus of these ICH bridges.
15629 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15630 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15631 static struct tg3_dev_id {
15635 } ich_chipsets[] = {
15636 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15638 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15640 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15642 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15646 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15647 struct pci_dev *bridge = NULL;
15649 while (pci_id->vendor != 0) {
15650 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15656 if (pci_id->rev != PCI_ANY_ID) {
15657 if (bridge->revision > pci_id->rev)
15660 if (bridge->subordinate &&
15661 (bridge->subordinate->number ==
15662 tp->pdev->bus->number)) {
15663 tg3_flag_set(tp, ICH_WORKAROUND);
15664 pci_dev_put(bridge);
15670 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15671 static struct tg3_dev_id {
15674 } bridge_chipsets[] = {
15675 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15676 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15679 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15680 struct pci_dev *bridge = NULL;
15682 while (pci_id->vendor != 0) {
15683 bridge = pci_get_device(pci_id->vendor,
15690 if (bridge->subordinate &&
15691 (bridge->subordinate->number <=
15692 tp->pdev->bus->number) &&
15693 (bridge->subordinate->busn_res.end >=
15694 tp->pdev->bus->number)) {
15695 tg3_flag_set(tp, 5701_DMA_BUG);
15696 pci_dev_put(bridge);
15702 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15703 * DMA addresses > 40-bit. This bridge may have other additional
15704 * 57xx devices behind it in some 4-port NIC designs for example.
15705 * Any tg3 device found behind the bridge will also need the 40-bit
15708 if (tg3_flag(tp, 5780_CLASS)) {
15709 tg3_flag_set(tp, 40BIT_DMA_BUG);
15710 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15712 struct pci_dev *bridge = NULL;
15715 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15716 PCI_DEVICE_ID_SERVERWORKS_EPB,
15718 if (bridge && bridge->subordinate &&
15719 (bridge->subordinate->number <=
15720 tp->pdev->bus->number) &&
15721 (bridge->subordinate->busn_res.end >=
15722 tp->pdev->bus->number)) {
15723 tg3_flag_set(tp, 40BIT_DMA_BUG);
15724 pci_dev_put(bridge);
15730 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15731 tg3_asic_rev(tp) == ASIC_REV_5714)
15732 tp->pdev_peer = tg3_find_peer(tp);
15734 /* Determine TSO capabilities */
15735 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15736 ; /* Do nothing. HW bug. */
15737 else if (tg3_flag(tp, 57765_PLUS))
15738 tg3_flag_set(tp, HW_TSO_3);
15739 else if (tg3_flag(tp, 5755_PLUS) ||
15740 tg3_asic_rev(tp) == ASIC_REV_5906)
15741 tg3_flag_set(tp, HW_TSO_2);
15742 else if (tg3_flag(tp, 5750_PLUS)) {
15743 tg3_flag_set(tp, HW_TSO_1);
15744 tg3_flag_set(tp, TSO_BUG);
15745 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15746 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15747 tg3_flag_clear(tp, TSO_BUG);
15748 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15749 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15750 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15751 tg3_flag_set(tp, FW_TSO);
15752 tg3_flag_set(tp, TSO_BUG);
15753 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15754 tp->fw_needed = FIRMWARE_TG3TSO5;
15756 tp->fw_needed = FIRMWARE_TG3TSO;
15759 /* Selectively allow TSO based on operating conditions */
15760 if (tg3_flag(tp, HW_TSO_1) ||
15761 tg3_flag(tp, HW_TSO_2) ||
15762 tg3_flag(tp, HW_TSO_3) ||
15763 tg3_flag(tp, FW_TSO)) {
15764 /* For firmware TSO, assume ASF is disabled.
15765 * We'll disable TSO later if we discover ASF
15766 * is enabled in tg3_get_eeprom_hw_cfg().
15768 tg3_flag_set(tp, TSO_CAPABLE);
15770 tg3_flag_clear(tp, TSO_CAPABLE);
15771 tg3_flag_clear(tp, TSO_BUG);
15772 tp->fw_needed = NULL;
15775 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15776 tp->fw_needed = FIRMWARE_TG3;
15778 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15779 tp->fw_needed = FIRMWARE_TG357766;
15783 if (tg3_flag(tp, 5750_PLUS)) {
15784 tg3_flag_set(tp, SUPPORT_MSI);
15785 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15786 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15787 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15788 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15789 tp->pdev_peer == tp->pdev))
15790 tg3_flag_clear(tp, SUPPORT_MSI);
15792 if (tg3_flag(tp, 5755_PLUS) ||
15793 tg3_asic_rev(tp) == ASIC_REV_5906) {
15794 tg3_flag_set(tp, 1SHOT_MSI);
15797 if (tg3_flag(tp, 57765_PLUS)) {
15798 tg3_flag_set(tp, SUPPORT_MSIX);
15799 tp->irq_max = TG3_IRQ_MAX_VECS;
15805 if (tp->irq_max > 1) {
15806 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15807 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15809 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15810 tg3_asic_rev(tp) == ASIC_REV_5720)
15811 tp->txq_max = tp->irq_max - 1;
15814 if (tg3_flag(tp, 5755_PLUS) ||
15815 tg3_asic_rev(tp) == ASIC_REV_5906)
15816 tg3_flag_set(tp, SHORT_DMA_BUG);
15818 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15819 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15821 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15822 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15823 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15824 tg3_asic_rev(tp) == ASIC_REV_5762)
15825 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15827 if (tg3_flag(tp, 57765_PLUS) &&
15828 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15829 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15831 if (!tg3_flag(tp, 5705_PLUS) ||
15832 tg3_flag(tp, 5780_CLASS) ||
15833 tg3_flag(tp, USE_JUMBO_BDFLAG))
15834 tg3_flag_set(tp, JUMBO_CAPABLE);
15836 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15839 if (pci_is_pcie(tp->pdev)) {
15842 tg3_flag_set(tp, PCI_EXPRESS);
15844 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15845 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15846 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15847 tg3_flag_clear(tp, HW_TSO_2);
15848 tg3_flag_clear(tp, TSO_CAPABLE);
15850 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15851 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15852 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15853 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15854 tg3_flag_set(tp, CLKREQ_BUG);
15855 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15856 tg3_flag_set(tp, L1PLLPD_EN);
15858 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15859 /* BCM5785 devices are effectively PCIe devices, and should
15860 * follow PCIe codepaths, but do not have a PCIe capabilities
15863 tg3_flag_set(tp, PCI_EXPRESS);
15864 } else if (!tg3_flag(tp, 5705_PLUS) ||
15865 tg3_flag(tp, 5780_CLASS)) {
15866 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15867 if (!tp->pcix_cap) {
15868 dev_err(&tp->pdev->dev,
15869 "Cannot find PCI-X capability, aborting\n");
15873 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15874 tg3_flag_set(tp, PCIX_MODE);
15877 /* If we have an AMD 762 or VIA K8T800 chipset, write
15878 * reordering to the mailbox registers done by the host
15879 * controller can cause major troubles. We read back from
15880 * every mailbox register write to force the writes to be
15881 * posted to the chip in order.
15883 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15884 !tg3_flag(tp, PCI_EXPRESS))
15885 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15887 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15888 &tp->pci_cacheline_sz);
15889 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15890 &tp->pci_lat_timer);
15891 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15892 tp->pci_lat_timer < 64) {
15893 tp->pci_lat_timer = 64;
15894 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15895 tp->pci_lat_timer);
15898 /* Important! -- It is critical that the PCI-X hw workaround
15899 * situation is decided before the first MMIO register access.
15901 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15902 /* 5700 BX chips need to have their TX producer index
15903 * mailboxes written twice to workaround a bug.
15905 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15907 /* If we are in PCI-X mode, enable register write workaround.
15909 * The workaround is to use indirect register accesses
15910 * for all chip writes not to mailbox registers.
15912 if (tg3_flag(tp, PCIX_MODE)) {
15915 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15917 /* The chip can have it's power management PCI config
15918 * space registers clobbered due to this bug.
15919 * So explicitly force the chip into D0 here.
15921 pci_read_config_dword(tp->pdev,
15922 tp->pm_cap + PCI_PM_CTRL,
15924 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15925 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15926 pci_write_config_dword(tp->pdev,
15927 tp->pm_cap + PCI_PM_CTRL,
15930 /* Also, force SERR#/PERR# in PCI command. */
15931 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15932 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15933 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15937 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15938 tg3_flag_set(tp, PCI_HIGH_SPEED);
15939 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15940 tg3_flag_set(tp, PCI_32BIT);
15942 /* Chip-specific fixup from Broadcom driver */
15943 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15944 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15945 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15946 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15949 /* Default fast path register access methods */
15950 tp->read32 = tg3_read32;
15951 tp->write32 = tg3_write32;
15952 tp->read32_mbox = tg3_read32;
15953 tp->write32_mbox = tg3_write32;
15954 tp->write32_tx_mbox = tg3_write32;
15955 tp->write32_rx_mbox = tg3_write32;
15957 /* Various workaround register access methods */
15958 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15959 tp->write32 = tg3_write_indirect_reg32;
15960 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15961 (tg3_flag(tp, PCI_EXPRESS) &&
15962 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15964 * Back to back register writes can cause problems on these
15965 * chips, the workaround is to read back all reg writes
15966 * except those to mailbox regs.
15968 * See tg3_write_indirect_reg32().
15970 tp->write32 = tg3_write_flush_reg32;
15973 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15974 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15975 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15976 tp->write32_rx_mbox = tg3_write_flush_reg32;
15979 if (tg3_flag(tp, ICH_WORKAROUND)) {
15980 tp->read32 = tg3_read_indirect_reg32;
15981 tp->write32 = tg3_write_indirect_reg32;
15982 tp->read32_mbox = tg3_read_indirect_mbox;
15983 tp->write32_mbox = tg3_write_indirect_mbox;
15984 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15985 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15990 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15991 pci_cmd &= ~PCI_COMMAND_MEMORY;
15992 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15994 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15995 tp->read32_mbox = tg3_read32_mbox_5906;
15996 tp->write32_mbox = tg3_write32_mbox_5906;
15997 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15998 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16001 if (tp->write32 == tg3_write_indirect_reg32 ||
16002 (tg3_flag(tp, PCIX_MODE) &&
16003 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16004 tg3_asic_rev(tp) == ASIC_REV_5701)))
16005 tg3_flag_set(tp, SRAM_USE_CONFIG);
16007 /* The memory arbiter has to be enabled in order for SRAM accesses
16008 * to succeed. Normally on powerup the tg3 chip firmware will make
16009 * sure it is enabled, but other entities such as system netboot
16010 * code might disable it.
16012 val = tr32(MEMARB_MODE);
16013 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16015 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16016 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16017 tg3_flag(tp, 5780_CLASS)) {
16018 if (tg3_flag(tp, PCIX_MODE)) {
16019 pci_read_config_dword(tp->pdev,
16020 tp->pcix_cap + PCI_X_STATUS,
16022 tp->pci_fn = val & 0x7;
16024 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16025 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16026 tg3_asic_rev(tp) == ASIC_REV_5720) {
16027 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16028 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16029 val = tr32(TG3_CPMU_STATUS);
16031 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16032 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16034 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16035 TG3_CPMU_STATUS_FSHFT_5719;
16038 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16039 tp->write32_tx_mbox = tg3_write_flush_reg32;
16040 tp->write32_rx_mbox = tg3_write_flush_reg32;
16043 /* Get eeprom hw config before calling tg3_set_power_state().
16044 * In particular, the TG3_FLAG_IS_NIC flag must be
16045 * determined before calling tg3_set_power_state() so that
16046 * we know whether or not to switch out of Vaux power.
16047 * When the flag is set, it means that GPIO1 is used for eeprom
16048 * write protect and also implies that it is a LOM where GPIOs
16049 * are not used to switch power.
16051 tg3_get_eeprom_hw_cfg(tp);
16053 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16054 tg3_flag_clear(tp, TSO_CAPABLE);
16055 tg3_flag_clear(tp, TSO_BUG);
16056 tp->fw_needed = NULL;
16059 if (tg3_flag(tp, ENABLE_APE)) {
16060 /* Allow reads and writes to the
16061 * APE register and memory space.
16063 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16064 PCISTATE_ALLOW_APE_SHMEM_WR |
16065 PCISTATE_ALLOW_APE_PSPACE_WR;
16066 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16069 tg3_ape_lock_init(tp);
16072 /* Set up tp->grc_local_ctrl before calling
16073 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16074 * will bring 5700's external PHY out of reset.
16075 * It is also used as eeprom write protect on LOMs.
16077 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16078 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16079 tg3_flag(tp, EEPROM_WRITE_PROT))
16080 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16081 GRC_LCLCTRL_GPIO_OUTPUT1);
16082 /* Unused GPIO3 must be driven as output on 5752 because there
16083 * are no pull-up resistors on unused GPIO pins.
16085 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16086 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16088 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16089 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16090 tg3_flag(tp, 57765_CLASS))
16091 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16093 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16094 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16095 /* Turn off the debug UART. */
16096 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16097 if (tg3_flag(tp, IS_NIC))
16098 /* Keep VMain power. */
16099 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16100 GRC_LCLCTRL_GPIO_OUTPUT0;
16103 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16104 tp->grc_local_ctrl |=
16105 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16107 /* Switch out of Vaux if it is a NIC */
16108 tg3_pwrsrc_switch_to_vmain(tp);
16110 /* Derive initial jumbo mode from MTU assigned in
16111 * ether_setup() via the alloc_etherdev() call
16113 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16114 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16116 /* Determine WakeOnLan speed to use. */
16117 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16118 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16119 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16120 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16121 tg3_flag_clear(tp, WOL_SPEED_100MB);
16123 tg3_flag_set(tp, WOL_SPEED_100MB);
16126 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16127 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16129 /* A few boards don't want Ethernet@WireSpeed phy feature */
16130 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16131 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16132 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16133 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16134 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16135 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16136 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16138 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16139 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16140 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16141 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16142 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16144 if (tg3_flag(tp, 5705_PLUS) &&
16145 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16146 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16147 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16148 !tg3_flag(tp, 57765_PLUS)) {
16149 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16150 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16151 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16152 tg3_asic_rev(tp) == ASIC_REV_5761) {
16153 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16154 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16155 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16156 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16157 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16159 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16162 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16163 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16164 tp->phy_otp = tg3_read_otp_phycfg(tp);
16165 if (tp->phy_otp == 0)
16166 tp->phy_otp = TG3_OTP_DEFAULT;
16169 if (tg3_flag(tp, CPMU_PRESENT))
16170 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16172 tp->mi_mode = MAC_MI_MODE_BASE;
16174 tp->coalesce_mode = 0;
16175 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16176 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16177 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16179 /* Set these bits to enable statistics workaround. */
16180 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16181 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16182 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16183 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16184 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16187 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16188 tg3_asic_rev(tp) == ASIC_REV_57780)
16189 tg3_flag_set(tp, USE_PHYLIB);
16191 err = tg3_mdio_init(tp);
16195 /* Initialize data/descriptor byte/word swapping. */
16196 val = tr32(GRC_MODE);
16197 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16198 tg3_asic_rev(tp) == ASIC_REV_5762)
16199 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16200 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16201 GRC_MODE_B2HRX_ENABLE |
16202 GRC_MODE_HTX2B_ENABLE |
16203 GRC_MODE_HOST_STACKUP);
16205 val &= GRC_MODE_HOST_STACKUP;
16207 tw32(GRC_MODE, val | tp->grc_mode);
16209 tg3_switch_clocks(tp);
16211 /* Clear this out for sanity. */
16212 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16214 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16216 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16217 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16218 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16219 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16220 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16221 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16222 void __iomem *sram_base;
16224 /* Write some dummy words into the SRAM status block
16225 * area, see if it reads back correctly. If the return
16226 * value is bad, force enable the PCIX workaround.
16228 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16230 writel(0x00000000, sram_base);
16231 writel(0x00000000, sram_base + 4);
16232 writel(0xffffffff, sram_base + 4);
16233 if (readl(sram_base) != 0x00000000)
16234 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16239 tg3_nvram_init(tp);
16241 /* If the device has an NVRAM, no need to load patch firmware */
16242 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16243 !tg3_flag(tp, NO_NVRAM))
16244 tp->fw_needed = NULL;
16246 grc_misc_cfg = tr32(GRC_MISC_CFG);
16247 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16249 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16250 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16251 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16252 tg3_flag_set(tp, IS_5788);
16254 if (!tg3_flag(tp, IS_5788) &&
16255 tg3_asic_rev(tp) != ASIC_REV_5700)
16256 tg3_flag_set(tp, TAGGED_STATUS);
16257 if (tg3_flag(tp, TAGGED_STATUS)) {
16258 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16259 HOSTCC_MODE_CLRTICK_TXBD);
16261 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16262 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16263 tp->misc_host_ctrl);
16266 /* Preserve the APE MAC_MODE bits */
16267 if (tg3_flag(tp, ENABLE_APE))
16268 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16272 if (tg3_10_100_only_device(tp, ent))
16273 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16275 err = tg3_phy_probe(tp);
16277 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16278 /* ... but do not return immediately ... */
16283 tg3_read_fw_ver(tp);
16285 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16286 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16288 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16289 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16291 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16294 /* 5700 {AX,BX} chips have a broken status block link
16295 * change bit implementation, so we must use the
16296 * status register in those cases.
16298 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16299 tg3_flag_set(tp, USE_LINKCHG_REG);
16301 tg3_flag_clear(tp, USE_LINKCHG_REG);
16303 /* The led_ctrl is set during tg3_phy_probe, here we might
16304 * have to force the link status polling mechanism based
16305 * upon subsystem IDs.
16307 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16308 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16309 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16310 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16311 tg3_flag_set(tp, USE_LINKCHG_REG);
16314 /* For all SERDES we poll the MAC status register. */
16315 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16316 tg3_flag_set(tp, POLL_SERDES);
16318 tg3_flag_clear(tp, POLL_SERDES);
16320 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16321 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16322 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16323 tg3_flag(tp, PCIX_MODE)) {
16324 tp->rx_offset = NET_SKB_PAD;
16325 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16326 tp->rx_copy_thresh = ~(u16)0;
16330 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16331 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16332 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16334 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16336 /* Increment the rx prod index on the rx std ring by at most
16337 * 8 for these chips to workaround hw errata.
16339 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16340 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16341 tg3_asic_rev(tp) == ASIC_REV_5755)
16342 tp->rx_std_max_post = 8;
16344 if (tg3_flag(tp, ASPM_WORKAROUND))
16345 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16346 PCIE_PWR_MGMT_L1_THRESH_MSK;
16351 #ifdef CONFIG_SPARC
16352 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16354 struct net_device *dev = tp->dev;
16355 struct pci_dev *pdev = tp->pdev;
16356 struct device_node *dp = pci_device_to_OF_node(pdev);
16357 const unsigned char *addr;
16360 addr = of_get_property(dp, "local-mac-address", &len);
16361 if (addr && len == 6) {
16362 memcpy(dev->dev_addr, addr, 6);
16368 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16370 struct net_device *dev = tp->dev;
16372 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16377 static int tg3_get_device_address(struct tg3 *tp)
16379 struct net_device *dev = tp->dev;
16380 u32 hi, lo, mac_offset;
16384 #ifdef CONFIG_SPARC
16385 if (!tg3_get_macaddr_sparc(tp))
16389 if (tg3_flag(tp, IS_SSB_CORE)) {
16390 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16391 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16396 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16397 tg3_flag(tp, 5780_CLASS)) {
16398 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16400 if (tg3_nvram_lock(tp))
16401 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16403 tg3_nvram_unlock(tp);
16404 } else if (tg3_flag(tp, 5717_PLUS)) {
16405 if (tp->pci_fn & 1)
16407 if (tp->pci_fn > 1)
16408 mac_offset += 0x18c;
16409 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16412 /* First try to get it from MAC address mailbox. */
16413 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16414 if ((hi >> 16) == 0x484b) {
16415 dev->dev_addr[0] = (hi >> 8) & 0xff;
16416 dev->dev_addr[1] = (hi >> 0) & 0xff;
16418 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16419 dev->dev_addr[2] = (lo >> 24) & 0xff;
16420 dev->dev_addr[3] = (lo >> 16) & 0xff;
16421 dev->dev_addr[4] = (lo >> 8) & 0xff;
16422 dev->dev_addr[5] = (lo >> 0) & 0xff;
16424 /* Some old bootcode may report a 0 MAC address in SRAM */
16425 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16428 /* Next, try NVRAM. */
16429 if (!tg3_flag(tp, NO_NVRAM) &&
16430 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16431 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16432 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16433 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16435 /* Finally just fetch it out of the MAC control regs. */
16437 hi = tr32(MAC_ADDR_0_HIGH);
16438 lo = tr32(MAC_ADDR_0_LOW);
16440 dev->dev_addr[5] = lo & 0xff;
16441 dev->dev_addr[4] = (lo >> 8) & 0xff;
16442 dev->dev_addr[3] = (lo >> 16) & 0xff;
16443 dev->dev_addr[2] = (lo >> 24) & 0xff;
16444 dev->dev_addr[1] = hi & 0xff;
16445 dev->dev_addr[0] = (hi >> 8) & 0xff;
16449 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16450 #ifdef CONFIG_SPARC
16451 if (!tg3_get_default_macaddr_sparc(tp))
16459 #define BOUNDARY_SINGLE_CACHELINE 1
16460 #define BOUNDARY_MULTI_CACHELINE 2
16462 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16464 int cacheline_size;
16468 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16470 cacheline_size = 1024;
16472 cacheline_size = (int) byte * 4;
16474 /* On 5703 and later chips, the boundary bits have no
16477 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16478 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16479 !tg3_flag(tp, PCI_EXPRESS))
16482 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16483 goal = BOUNDARY_MULTI_CACHELINE;
16485 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16486 goal = BOUNDARY_SINGLE_CACHELINE;
16492 if (tg3_flag(tp, 57765_PLUS)) {
16493 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16500 /* PCI controllers on most RISC systems tend to disconnect
16501 * when a device tries to burst across a cache-line boundary.
16502 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16504 * Unfortunately, for PCI-E there are only limited
16505 * write-side controls for this, and thus for reads
16506 * we will still get the disconnects. We'll also waste
16507 * these PCI cycles for both read and write for chips
16508 * other than 5700 and 5701 which do not implement the
16511 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16512 switch (cacheline_size) {
16517 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16518 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16519 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16521 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16522 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16527 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16528 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16532 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16533 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16536 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16537 switch (cacheline_size) {
16541 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16542 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16543 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16549 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16550 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16554 switch (cacheline_size) {
16556 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16557 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16558 DMA_RWCTRL_WRITE_BNDRY_16);
16563 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16564 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16565 DMA_RWCTRL_WRITE_BNDRY_32);
16570 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16571 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16572 DMA_RWCTRL_WRITE_BNDRY_64);
16577 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16578 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16579 DMA_RWCTRL_WRITE_BNDRY_128);
16584 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16585 DMA_RWCTRL_WRITE_BNDRY_256);
16588 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16589 DMA_RWCTRL_WRITE_BNDRY_512);
16593 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16594 DMA_RWCTRL_WRITE_BNDRY_1024);
16603 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16604 int size, bool to_device)
16606 struct tg3_internal_buffer_desc test_desc;
16607 u32 sram_dma_descs;
16610 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16612 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16613 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16614 tw32(RDMAC_STATUS, 0);
16615 tw32(WDMAC_STATUS, 0);
16617 tw32(BUFMGR_MODE, 0);
16618 tw32(FTQ_RESET, 0);
16620 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16621 test_desc.addr_lo = buf_dma & 0xffffffff;
16622 test_desc.nic_mbuf = 0x00002100;
16623 test_desc.len = size;
16626 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16627 * the *second* time the tg3 driver was getting loaded after an
16630 * Broadcom tells me:
16631 * ...the DMA engine is connected to the GRC block and a DMA
16632 * reset may affect the GRC block in some unpredictable way...
16633 * The behavior of resets to individual blocks has not been tested.
16635 * Broadcom noted the GRC reset will also reset all sub-components.
16638 test_desc.cqid_sqid = (13 << 8) | 2;
16640 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16643 test_desc.cqid_sqid = (16 << 8) | 7;
16645 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16648 test_desc.flags = 0x00000005;
16650 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16653 val = *(((u32 *)&test_desc) + i);
16654 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16655 sram_dma_descs + (i * sizeof(u32)));
16656 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16658 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16661 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16663 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16666 for (i = 0; i < 40; i++) {
16670 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16672 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16673 if ((val & 0xffff) == sram_dma_descs) {
16684 #define TEST_BUFFER_SIZE 0x2000
16686 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16687 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16691 static int tg3_test_dma(struct tg3 *tp)
16693 dma_addr_t buf_dma;
16694 u32 *buf, saved_dma_rwctrl;
16697 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16698 &buf_dma, GFP_KERNEL);
16704 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16705 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16707 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16709 if (tg3_flag(tp, 57765_PLUS))
16712 if (tg3_flag(tp, PCI_EXPRESS)) {
16713 /* DMA read watermark not used on PCIE */
16714 tp->dma_rwctrl |= 0x00180000;
16715 } else if (!tg3_flag(tp, PCIX_MODE)) {
16716 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16717 tg3_asic_rev(tp) == ASIC_REV_5750)
16718 tp->dma_rwctrl |= 0x003f0000;
16720 tp->dma_rwctrl |= 0x003f000f;
16722 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16723 tg3_asic_rev(tp) == ASIC_REV_5704) {
16724 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16725 u32 read_water = 0x7;
16727 /* If the 5704 is behind the EPB bridge, we can
16728 * do the less restrictive ONE_DMA workaround for
16729 * better performance.
16731 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16732 tg3_asic_rev(tp) == ASIC_REV_5704)
16733 tp->dma_rwctrl |= 0x8000;
16734 else if (ccval == 0x6 || ccval == 0x7)
16735 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16737 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16739 /* Set bit 23 to enable PCIX hw bug fix */
16741 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16742 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16744 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16745 /* 5780 always in PCIX mode */
16746 tp->dma_rwctrl |= 0x00144000;
16747 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16748 /* 5714 always in PCIX mode */
16749 tp->dma_rwctrl |= 0x00148000;
16751 tp->dma_rwctrl |= 0x001b000f;
16754 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16755 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16757 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16758 tg3_asic_rev(tp) == ASIC_REV_5704)
16759 tp->dma_rwctrl &= 0xfffffff0;
16761 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16762 tg3_asic_rev(tp) == ASIC_REV_5701) {
16763 /* Remove this if it causes problems for some boards. */
16764 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16766 /* On 5700/5701 chips, we need to set this bit.
16767 * Otherwise the chip will issue cacheline transactions
16768 * to streamable DMA memory with not all the byte
16769 * enables turned on. This is an error on several
16770 * RISC PCI controllers, in particular sparc64.
16772 * On 5703/5704 chips, this bit has been reassigned
16773 * a different meaning. In particular, it is used
16774 * on those chips to enable a PCI-X workaround.
16776 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16779 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16782 /* Unneeded, already done by tg3_get_invariants. */
16783 tg3_switch_clocks(tp);
16786 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16787 tg3_asic_rev(tp) != ASIC_REV_5701)
16790 /* It is best to perform DMA test with maximum write burst size
16791 * to expose the 5700/5701 write DMA bug.
16793 saved_dma_rwctrl = tp->dma_rwctrl;
16794 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16795 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16800 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16803 /* Send the buffer to the chip. */
16804 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16806 dev_err(&tp->pdev->dev,
16807 "%s: Buffer write failed. err = %d\n",
16813 /* validate data reached card RAM correctly. */
16814 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16816 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16817 if (le32_to_cpu(val) != p[i]) {
16818 dev_err(&tp->pdev->dev,
16819 "%s: Buffer corrupted on device! "
16820 "(%d != %d)\n", __func__, val, i);
16821 /* ret = -ENODEV here? */
16826 /* Now read it back. */
16827 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16829 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16830 "err = %d\n", __func__, ret);
16835 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16839 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16840 DMA_RWCTRL_WRITE_BNDRY_16) {
16841 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16842 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16843 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16846 dev_err(&tp->pdev->dev,
16847 "%s: Buffer corrupted on read back! "
16848 "(%d != %d)\n", __func__, p[i], i);
16854 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16860 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16861 DMA_RWCTRL_WRITE_BNDRY_16) {
16862 /* DMA test passed without adjusting DMA boundary,
16863 * now look for chipsets that are known to expose the
16864 * DMA bug without failing the test.
16866 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16867 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16868 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16870 /* Safe to use the calculated DMA boundary. */
16871 tp->dma_rwctrl = saved_dma_rwctrl;
16874 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16878 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16883 static void tg3_init_bufmgr_config(struct tg3 *tp)
16885 if (tg3_flag(tp, 57765_PLUS)) {
16886 tp->bufmgr_config.mbuf_read_dma_low_water =
16887 DEFAULT_MB_RDMA_LOW_WATER_5705;
16888 tp->bufmgr_config.mbuf_mac_rx_low_water =
16889 DEFAULT_MB_MACRX_LOW_WATER_57765;
16890 tp->bufmgr_config.mbuf_high_water =
16891 DEFAULT_MB_HIGH_WATER_57765;
16893 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16894 DEFAULT_MB_RDMA_LOW_WATER_5705;
16895 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16896 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16897 tp->bufmgr_config.mbuf_high_water_jumbo =
16898 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16899 } else if (tg3_flag(tp, 5705_PLUS)) {
16900 tp->bufmgr_config.mbuf_read_dma_low_water =
16901 DEFAULT_MB_RDMA_LOW_WATER_5705;
16902 tp->bufmgr_config.mbuf_mac_rx_low_water =
16903 DEFAULT_MB_MACRX_LOW_WATER_5705;
16904 tp->bufmgr_config.mbuf_high_water =
16905 DEFAULT_MB_HIGH_WATER_5705;
16906 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16907 tp->bufmgr_config.mbuf_mac_rx_low_water =
16908 DEFAULT_MB_MACRX_LOW_WATER_5906;
16909 tp->bufmgr_config.mbuf_high_water =
16910 DEFAULT_MB_HIGH_WATER_5906;
16913 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16914 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16915 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16916 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16917 tp->bufmgr_config.mbuf_high_water_jumbo =
16918 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16920 tp->bufmgr_config.mbuf_read_dma_low_water =
16921 DEFAULT_MB_RDMA_LOW_WATER;
16922 tp->bufmgr_config.mbuf_mac_rx_low_water =
16923 DEFAULT_MB_MACRX_LOW_WATER;
16924 tp->bufmgr_config.mbuf_high_water =
16925 DEFAULT_MB_HIGH_WATER;
16927 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16928 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16929 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16930 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16931 tp->bufmgr_config.mbuf_high_water_jumbo =
16932 DEFAULT_MB_HIGH_WATER_JUMBO;
16935 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16936 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16939 static char *tg3_phy_string(struct tg3 *tp)
16941 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16942 case TG3_PHY_ID_BCM5400: return "5400";
16943 case TG3_PHY_ID_BCM5401: return "5401";
16944 case TG3_PHY_ID_BCM5411: return "5411";
16945 case TG3_PHY_ID_BCM5701: return "5701";
16946 case TG3_PHY_ID_BCM5703: return "5703";
16947 case TG3_PHY_ID_BCM5704: return "5704";
16948 case TG3_PHY_ID_BCM5705: return "5705";
16949 case TG3_PHY_ID_BCM5750: return "5750";
16950 case TG3_PHY_ID_BCM5752: return "5752";
16951 case TG3_PHY_ID_BCM5714: return "5714";
16952 case TG3_PHY_ID_BCM5780: return "5780";
16953 case TG3_PHY_ID_BCM5755: return "5755";
16954 case TG3_PHY_ID_BCM5787: return "5787";
16955 case TG3_PHY_ID_BCM5784: return "5784";
16956 case TG3_PHY_ID_BCM5756: return "5722/5756";
16957 case TG3_PHY_ID_BCM5906: return "5906";
16958 case TG3_PHY_ID_BCM5761: return "5761";
16959 case TG3_PHY_ID_BCM5718C: return "5718C";
16960 case TG3_PHY_ID_BCM5718S: return "5718S";
16961 case TG3_PHY_ID_BCM57765: return "57765";
16962 case TG3_PHY_ID_BCM5719C: return "5719C";
16963 case TG3_PHY_ID_BCM5720C: return "5720C";
16964 case TG3_PHY_ID_BCM5762: return "5762C";
16965 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16966 case 0: return "serdes";
16967 default: return "unknown";
16971 static char *tg3_bus_string(struct tg3 *tp, char *str)
16973 if (tg3_flag(tp, PCI_EXPRESS)) {
16974 strcpy(str, "PCI Express");
16976 } else if (tg3_flag(tp, PCIX_MODE)) {
16977 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16979 strcpy(str, "PCIX:");
16981 if ((clock_ctrl == 7) ||
16982 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16983 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16984 strcat(str, "133MHz");
16985 else if (clock_ctrl == 0)
16986 strcat(str, "33MHz");
16987 else if (clock_ctrl == 2)
16988 strcat(str, "50MHz");
16989 else if (clock_ctrl == 4)
16990 strcat(str, "66MHz");
16991 else if (clock_ctrl == 6)
16992 strcat(str, "100MHz");
16994 strcpy(str, "PCI:");
16995 if (tg3_flag(tp, PCI_HIGH_SPEED))
16996 strcat(str, "66MHz");
16998 strcat(str, "33MHz");
17000 if (tg3_flag(tp, PCI_32BIT))
17001 strcat(str, ":32-bit");
17003 strcat(str, ":64-bit");
17007 static void tg3_init_coal(struct tg3 *tp)
17009 struct ethtool_coalesce *ec = &tp->coal;
17011 memset(ec, 0, sizeof(*ec));
17012 ec->cmd = ETHTOOL_GCOALESCE;
17013 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17014 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17015 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17016 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17017 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17018 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17019 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17020 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17021 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17023 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17024 HOSTCC_MODE_CLRTICK_TXBD)) {
17025 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17026 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17027 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17028 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17031 if (tg3_flag(tp, 5705_PLUS)) {
17032 ec->rx_coalesce_usecs_irq = 0;
17033 ec->tx_coalesce_usecs_irq = 0;
17034 ec->stats_block_coalesce_usecs = 0;
17038 static int tg3_init_one(struct pci_dev *pdev,
17039 const struct pci_device_id *ent)
17041 struct net_device *dev;
17043 int i, err, pm_cap;
17044 u32 sndmbx, rcvmbx, intmbx;
17046 u64 dma_mask, persist_dma_mask;
17047 netdev_features_t features = 0;
17049 printk_once(KERN_INFO "%s\n", version);
17051 err = pci_enable_device(pdev);
17053 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17057 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17059 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17060 goto err_out_disable_pdev;
17063 pci_set_master(pdev);
17065 /* Find power-management capability. */
17066 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17068 dev_err(&pdev->dev,
17069 "Cannot find Power Management capability, aborting\n");
17071 goto err_out_free_res;
17074 err = pci_set_power_state(pdev, PCI_D0);
17076 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17077 goto err_out_free_res;
17080 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17083 goto err_out_power_down;
17086 SET_NETDEV_DEV(dev, &pdev->dev);
17088 tp = netdev_priv(dev);
17091 tp->pm_cap = pm_cap;
17092 tp->rx_mode = TG3_DEF_RX_MODE;
17093 tp->tx_mode = TG3_DEF_TX_MODE;
17097 tp->msg_enable = tg3_debug;
17099 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17101 if (pdev_is_ssb_gige_core(pdev)) {
17102 tg3_flag_set(tp, IS_SSB_CORE);
17103 if (ssb_gige_must_flush_posted_writes(pdev))
17104 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17105 if (ssb_gige_one_dma_at_once(pdev))
17106 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17107 if (ssb_gige_have_roboswitch(pdev))
17108 tg3_flag_set(tp, ROBOSWITCH);
17109 if (ssb_gige_is_rgmii(pdev))
17110 tg3_flag_set(tp, RGMII_MODE);
17113 /* The word/byte swap controls here control register access byte
17114 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17117 tp->misc_host_ctrl =
17118 MISC_HOST_CTRL_MASK_PCI_INT |
17119 MISC_HOST_CTRL_WORD_SWAP |
17120 MISC_HOST_CTRL_INDIR_ACCESS |
17121 MISC_HOST_CTRL_PCISTATE_RW;
17123 /* The NONFRM (non-frame) byte/word swap controls take effect
17124 * on descriptor entries, anything which isn't packet data.
17126 * The StrongARM chips on the board (one for tx, one for rx)
17127 * are running in big-endian mode.
17129 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17130 GRC_MODE_WSWAP_NONFRM_DATA);
17131 #ifdef __BIG_ENDIAN
17132 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17134 spin_lock_init(&tp->lock);
17135 spin_lock_init(&tp->indirect_lock);
17136 INIT_WORK(&tp->reset_task, tg3_reset_task);
17138 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17140 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17142 goto err_out_free_dev;
17145 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17146 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17147 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17148 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17149 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17150 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17151 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17152 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17153 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17154 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17155 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17157 tg3_flag_set(tp, ENABLE_APE);
17158 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17159 if (!tp->aperegs) {
17160 dev_err(&pdev->dev,
17161 "Cannot map APE registers, aborting\n");
17163 goto err_out_iounmap;
17167 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17168 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17170 dev->ethtool_ops = &tg3_ethtool_ops;
17171 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17172 dev->netdev_ops = &tg3_netdev_ops;
17173 dev->irq = pdev->irq;
17175 err = tg3_get_invariants(tp, ent);
17177 dev_err(&pdev->dev,
17178 "Problem fetching invariants of chip, aborting\n");
17179 goto err_out_apeunmap;
17182 /* The EPB bridge inside 5714, 5715, and 5780 and any
17183 * device behind the EPB cannot support DMA addresses > 40-bit.
17184 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17185 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17186 * do DMA address check in tg3_start_xmit().
17188 if (tg3_flag(tp, IS_5788))
17189 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17190 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17191 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17192 #ifdef CONFIG_HIGHMEM
17193 dma_mask = DMA_BIT_MASK(64);
17196 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17198 /* Configure DMA attributes. */
17199 if (dma_mask > DMA_BIT_MASK(32)) {
17200 err = pci_set_dma_mask(pdev, dma_mask);
17202 features |= NETIF_F_HIGHDMA;
17203 err = pci_set_consistent_dma_mask(pdev,
17206 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17207 "DMA for consistent allocations\n");
17208 goto err_out_apeunmap;
17212 if (err || dma_mask == DMA_BIT_MASK(32)) {
17213 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17215 dev_err(&pdev->dev,
17216 "No usable DMA configuration, aborting\n");
17217 goto err_out_apeunmap;
17221 tg3_init_bufmgr_config(tp);
17223 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17225 /* 5700 B0 chips do not support checksumming correctly due
17226 * to hardware bugs.
17228 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17229 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17231 if (tg3_flag(tp, 5755_PLUS))
17232 features |= NETIF_F_IPV6_CSUM;
17235 /* TSO is on by default on chips that support hardware TSO.
17236 * Firmware TSO on older chips gives lower performance, so it
17237 * is off by default, but can be enabled using ethtool.
17239 if ((tg3_flag(tp, HW_TSO_1) ||
17240 tg3_flag(tp, HW_TSO_2) ||
17241 tg3_flag(tp, HW_TSO_3)) &&
17242 (features & NETIF_F_IP_CSUM))
17243 features |= NETIF_F_TSO;
17244 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17245 if (features & NETIF_F_IPV6_CSUM)
17246 features |= NETIF_F_TSO6;
17247 if (tg3_flag(tp, HW_TSO_3) ||
17248 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17249 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17250 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17251 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17252 tg3_asic_rev(tp) == ASIC_REV_57780)
17253 features |= NETIF_F_TSO_ECN;
17256 dev->features |= features;
17257 dev->vlan_features |= features;
17260 * Add loopback capability only for a subset of devices that support
17261 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17262 * loopback for the remaining devices.
17264 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17265 !tg3_flag(tp, CPMU_PRESENT))
17266 /* Add the loopback capability */
17267 features |= NETIF_F_LOOPBACK;
17269 dev->hw_features |= features;
17271 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17272 !tg3_flag(tp, TSO_CAPABLE) &&
17273 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17274 tg3_flag_set(tp, MAX_RXPEND_64);
17275 tp->rx_pending = 63;
17278 err = tg3_get_device_address(tp);
17280 dev_err(&pdev->dev,
17281 "Could not obtain valid ethernet address, aborting\n");
17282 goto err_out_apeunmap;
17286 * Reset chip in case UNDI or EFI driver did not shutdown
17287 * DMA self test will enable WDMAC and we'll see (spurious)
17288 * pending DMA on the PCI bus at that point.
17290 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17291 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17292 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17293 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17296 err = tg3_test_dma(tp);
17298 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17299 goto err_out_apeunmap;
17302 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17303 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17304 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17305 for (i = 0; i < tp->irq_max; i++) {
17306 struct tg3_napi *tnapi = &tp->napi[i];
17309 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17311 tnapi->int_mbox = intmbx;
17317 tnapi->consmbox = rcvmbx;
17318 tnapi->prodmbox = sndmbx;
17321 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17323 tnapi->coal_now = HOSTCC_MODE_NOW;
17325 if (!tg3_flag(tp, SUPPORT_MSIX))
17329 * If we support MSIX, we'll be using RSS. If we're using
17330 * RSS, the first vector only handles link interrupts and the
17331 * remaining vectors handle rx and tx interrupts. Reuse the
17332 * mailbox values for the next iteration. The values we setup
17333 * above are still useful for the single vectored mode.
17348 pci_set_drvdata(pdev, dev);
17350 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17351 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17352 tg3_asic_rev(tp) == ASIC_REV_5762)
17353 tg3_flag_set(tp, PTP_CAPABLE);
17355 if (tg3_flag(tp, 5717_PLUS)) {
17356 /* Resume a low-power mode */
17357 tg3_frob_aux_power(tp, false);
17360 tg3_timer_init(tp);
17362 tg3_carrier_off(tp);
17364 err = register_netdev(dev);
17366 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17367 goto err_out_apeunmap;
17370 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17371 tp->board_part_number,
17372 tg3_chip_rev_id(tp),
17373 tg3_bus_string(tp, str),
17376 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17377 struct phy_device *phydev;
17378 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17380 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17381 phydev->drv->name, dev_name(&phydev->dev));
17385 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17386 ethtype = "10/100Base-TX";
17387 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17388 ethtype = "1000Base-SX";
17390 ethtype = "10/100/1000Base-T";
17392 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17393 "(WireSpeed[%d], EEE[%d])\n",
17394 tg3_phy_string(tp), ethtype,
17395 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17396 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17399 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17400 (dev->features & NETIF_F_RXCSUM) != 0,
17401 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17402 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17403 tg3_flag(tp, ENABLE_ASF) != 0,
17404 tg3_flag(tp, TSO_CAPABLE) != 0);
17405 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17407 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17408 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17410 pci_save_state(pdev);
17416 iounmap(tp->aperegs);
17417 tp->aperegs = NULL;
17429 err_out_power_down:
17430 pci_set_power_state(pdev, PCI_D3hot);
17433 pci_release_regions(pdev);
17435 err_out_disable_pdev:
17436 pci_disable_device(pdev);
17437 pci_set_drvdata(pdev, NULL);
17441 static void tg3_remove_one(struct pci_dev *pdev)
17443 struct net_device *dev = pci_get_drvdata(pdev);
17446 struct tg3 *tp = netdev_priv(dev);
17448 release_firmware(tp->fw);
17450 tg3_reset_task_cancel(tp);
17452 if (tg3_flag(tp, USE_PHYLIB)) {
17457 unregister_netdev(dev);
17459 iounmap(tp->aperegs);
17460 tp->aperegs = NULL;
17467 pci_release_regions(pdev);
17468 pci_disable_device(pdev);
17469 pci_set_drvdata(pdev, NULL);
17473 #ifdef CONFIG_PM_SLEEP
17474 static int tg3_suspend(struct device *device)
17476 struct pci_dev *pdev = to_pci_dev(device);
17477 struct net_device *dev = pci_get_drvdata(pdev);
17478 struct tg3 *tp = netdev_priv(dev);
17481 if (!netif_running(dev))
17484 tg3_reset_task_cancel(tp);
17486 tg3_netif_stop(tp);
17488 tg3_timer_stop(tp);
17490 tg3_full_lock(tp, 1);
17491 tg3_disable_ints(tp);
17492 tg3_full_unlock(tp);
17494 netif_device_detach(dev);
17496 tg3_full_lock(tp, 0);
17497 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17498 tg3_flag_clear(tp, INIT_COMPLETE);
17499 tg3_full_unlock(tp);
17501 err = tg3_power_down_prepare(tp);
17505 tg3_full_lock(tp, 0);
17507 tg3_flag_set(tp, INIT_COMPLETE);
17508 err2 = tg3_restart_hw(tp, true);
17512 tg3_timer_start(tp);
17514 netif_device_attach(dev);
17515 tg3_netif_start(tp);
17518 tg3_full_unlock(tp);
17527 static int tg3_resume(struct device *device)
17529 struct pci_dev *pdev = to_pci_dev(device);
17530 struct net_device *dev = pci_get_drvdata(pdev);
17531 struct tg3 *tp = netdev_priv(dev);
17534 if (!netif_running(dev))
17537 netif_device_attach(dev);
17539 tg3_full_lock(tp, 0);
17541 tg3_flag_set(tp, INIT_COMPLETE);
17542 err = tg3_restart_hw(tp,
17543 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17547 tg3_timer_start(tp);
17549 tg3_netif_start(tp);
17552 tg3_full_unlock(tp);
17559 #endif /* CONFIG_PM_SLEEP */
17561 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17564 * tg3_io_error_detected - called when PCI error is detected
17565 * @pdev: Pointer to PCI device
17566 * @state: The current pci connection state
17568 * This function is called after a PCI bus error affecting
17569 * this device has been detected.
17571 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17572 pci_channel_state_t state)
17574 struct net_device *netdev = pci_get_drvdata(pdev);
17575 struct tg3 *tp = netdev_priv(netdev);
17576 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17578 netdev_info(netdev, "PCI I/O error detected\n");
17582 if (!netif_running(netdev))
17587 tg3_netif_stop(tp);
17589 tg3_timer_stop(tp);
17591 /* Want to make sure that the reset task doesn't run */
17592 tg3_reset_task_cancel(tp);
17594 netif_device_detach(netdev);
17596 /* Clean up software state, even if MMIO is blocked */
17597 tg3_full_lock(tp, 0);
17598 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17599 tg3_full_unlock(tp);
17602 if (state == pci_channel_io_perm_failure)
17603 err = PCI_ERS_RESULT_DISCONNECT;
17605 pci_disable_device(pdev);
17613 * tg3_io_slot_reset - called after the pci bus has been reset.
17614 * @pdev: Pointer to PCI device
17616 * Restart the card from scratch, as if from a cold-boot.
17617 * At this point, the card has exprienced a hard reset,
17618 * followed by fixups by BIOS, and has its config space
17619 * set up identically to what it was at cold boot.
17621 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17623 struct net_device *netdev = pci_get_drvdata(pdev);
17624 struct tg3 *tp = netdev_priv(netdev);
17625 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17630 if (pci_enable_device(pdev)) {
17631 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17635 pci_set_master(pdev);
17636 pci_restore_state(pdev);
17637 pci_save_state(pdev);
17639 if (!netif_running(netdev)) {
17640 rc = PCI_ERS_RESULT_RECOVERED;
17644 err = tg3_power_up(tp);
17648 rc = PCI_ERS_RESULT_RECOVERED;
17657 * tg3_io_resume - called when traffic can start flowing again.
17658 * @pdev: Pointer to PCI device
17660 * This callback is called when the error recovery driver tells
17661 * us that its OK to resume normal operation.
17663 static void tg3_io_resume(struct pci_dev *pdev)
17665 struct net_device *netdev = pci_get_drvdata(pdev);
17666 struct tg3 *tp = netdev_priv(netdev);
17671 if (!netif_running(netdev))
17674 tg3_full_lock(tp, 0);
17675 tg3_flag_set(tp, INIT_COMPLETE);
17676 err = tg3_restart_hw(tp, true);
17678 tg3_full_unlock(tp);
17679 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17683 netif_device_attach(netdev);
17685 tg3_timer_start(tp);
17687 tg3_netif_start(tp);
17689 tg3_full_unlock(tp);
17697 static const struct pci_error_handlers tg3_err_handler = {
17698 .error_detected = tg3_io_error_detected,
17699 .slot_reset = tg3_io_slot_reset,
17700 .resume = tg3_io_resume
17703 static struct pci_driver tg3_driver = {
17704 .name = DRV_MODULE_NAME,
17705 .id_table = tg3_pci_tbl,
17706 .probe = tg3_init_one,
17707 .remove = tg3_remove_one,
17708 .err_handler = &tg3_err_handler,
17709 .driver.pm = &tg3_pm_ops,
17712 static int __init tg3_init(void)
17714 return pci_register_driver(&tg3_driver);
17717 static void __exit tg3_cleanup(void)
17719 pci_unregister_driver(&tg3_driver);
17722 module_init(tg3_init);
17723 module_exit(tg3_cleanup);