7b71387cf93cfe6ed34e1baf0cdc9ff21a81e1d1
[linux-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     123
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "March 21, 2012"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
208
209 #define FIRMWARE_TG3            "tigon/tg3.bin"
210 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
211 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
212
213 static char version[] __devinitdata =
214         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215
216 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
217 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
218 MODULE_LICENSE("GPL");
219 MODULE_VERSION(DRV_MODULE_VERSION);
220 MODULE_FIRMWARE(FIRMWARE_TG3);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
222 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223
224 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
225 module_param(tg3_debug, int, 0);
226 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227
228 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
307         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
308         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
309         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
310         {}
311 };
312
313 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314
315 static const struct {
316         const char string[ETH_GSTRING_LEN];
317 } ethtool_stats_keys[] = {
318         { "rx_octets" },
319         { "rx_fragments" },
320         { "rx_ucast_packets" },
321         { "rx_mcast_packets" },
322         { "rx_bcast_packets" },
323         { "rx_fcs_errors" },
324         { "rx_align_errors" },
325         { "rx_xon_pause_rcvd" },
326         { "rx_xoff_pause_rcvd" },
327         { "rx_mac_ctrl_rcvd" },
328         { "rx_xoff_entered" },
329         { "rx_frame_too_long_errors" },
330         { "rx_jabbers" },
331         { "rx_undersize_packets" },
332         { "rx_in_length_errors" },
333         { "rx_out_length_errors" },
334         { "rx_64_or_less_octet_packets" },
335         { "rx_65_to_127_octet_packets" },
336         { "rx_128_to_255_octet_packets" },
337         { "rx_256_to_511_octet_packets" },
338         { "rx_512_to_1023_octet_packets" },
339         { "rx_1024_to_1522_octet_packets" },
340         { "rx_1523_to_2047_octet_packets" },
341         { "rx_2048_to_4095_octet_packets" },
342         { "rx_4096_to_8191_octet_packets" },
343         { "rx_8192_to_9022_octet_packets" },
344
345         { "tx_octets" },
346         { "tx_collisions" },
347
348         { "tx_xon_sent" },
349         { "tx_xoff_sent" },
350         { "tx_flow_control" },
351         { "tx_mac_errors" },
352         { "tx_single_collisions" },
353         { "tx_mult_collisions" },
354         { "tx_deferred" },
355         { "tx_excessive_collisions" },
356         { "tx_late_collisions" },
357         { "tx_collide_2times" },
358         { "tx_collide_3times" },
359         { "tx_collide_4times" },
360         { "tx_collide_5times" },
361         { "tx_collide_6times" },
362         { "tx_collide_7times" },
363         { "tx_collide_8times" },
364         { "tx_collide_9times" },
365         { "tx_collide_10times" },
366         { "tx_collide_11times" },
367         { "tx_collide_12times" },
368         { "tx_collide_13times" },
369         { "tx_collide_14times" },
370         { "tx_collide_15times" },
371         { "tx_ucast_packets" },
372         { "tx_mcast_packets" },
373         { "tx_bcast_packets" },
374         { "tx_carrier_sense_errors" },
375         { "tx_discards" },
376         { "tx_errors" },
377
378         { "dma_writeq_full" },
379         { "dma_write_prioq_full" },
380         { "rxbds_empty" },
381         { "rx_discards" },
382         { "rx_errors" },
383         { "rx_threshold_hit" },
384
385         { "dma_readq_full" },
386         { "dma_read_prioq_full" },
387         { "tx_comp_queue_full" },
388
389         { "ring_set_send_prod_index" },
390         { "ring_status_update" },
391         { "nic_irqs" },
392         { "nic_avoided_irqs" },
393         { "nic_tx_threshold_hit" },
394
395         { "mbuf_lwm_thresh_hit" },
396 };
397
398 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
399
400
401 static const struct {
402         const char string[ETH_GSTRING_LEN];
403 } ethtool_test_keys[] = {
404         { "nvram test        (online) " },
405         { "link test         (online) " },
406         { "register test     (offline)" },
407         { "memory test       (offline)" },
408         { "mac loopback test (offline)" },
409         { "phy loopback test (offline)" },
410         { "ext loopback test (offline)" },
411         { "interrupt test    (offline)" },
412 };
413
414 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
415
416
417 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 {
419         writel(val, tp->regs + off);
420 }
421
422 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 {
424         return readl(tp->regs + off);
425 }
426
427 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 {
429         writel(val, tp->aperegs + off);
430 }
431
432 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 {
434         return readl(tp->aperegs + off);
435 }
436
437 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
438 {
439         unsigned long flags;
440
441         spin_lock_irqsave(&tp->indirect_lock, flags);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
444         spin_unlock_irqrestore(&tp->indirect_lock, flags);
445 }
446
447 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 {
449         writel(val, tp->regs + off);
450         readl(tp->regs + off);
451 }
452
453 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
454 {
455         unsigned long flags;
456         u32 val;
457
458         spin_lock_irqsave(&tp->indirect_lock, flags);
459         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
460         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
461         spin_unlock_irqrestore(&tp->indirect_lock, flags);
462         return val;
463 }
464
465 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
466 {
467         unsigned long flags;
468
469         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
470                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
471                                        TG3_64BIT_REG_LOW, val);
472                 return;
473         }
474         if (off == TG3_RX_STD_PROD_IDX_REG) {
475                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
476                                        TG3_64BIT_REG_LOW, val);
477                 return;
478         }
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484
485         /* In indirect mode when disabling interrupts, we also need
486          * to clear the interrupt bit in the GRC local ctrl register.
487          */
488         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
489             (val == 0x1)) {
490                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
491                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
492         }
493 }
494
495 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
496 {
497         unsigned long flags;
498         u32 val;
499
500         spin_lock_irqsave(&tp->indirect_lock, flags);
501         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
502         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503         spin_unlock_irqrestore(&tp->indirect_lock, flags);
504         return val;
505 }
506
507 /* usec_wait specifies the wait time in usec when writing to certain registers
508  * where it is unsafe to read back the register without some delay.
509  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
510  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511  */
512 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 {
514         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
515                 /* Non-posted methods */
516                 tp->write32(tp, off, val);
517         else {
518                 /* Posted method */
519                 tg3_write32(tp, off, val);
520                 if (usec_wait)
521                         udelay(usec_wait);
522                 tp->read32(tp, off);
523         }
524         /* Wait again after the read for the posted method to guarantee that
525          * the wait time is met.
526          */
527         if (usec_wait)
528                 udelay(usec_wait);
529 }
530
531 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 {
533         tp->write32_mbox(tp, off, val);
534         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
535                 tp->read32_mbox(tp, off);
536 }
537
538 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 {
540         void __iomem *mbox = tp->regs + off;
541         writel(val, mbox);
542         if (tg3_flag(tp, TXD_MBOX_HWBUG))
543                 writel(val, mbox);
544         if (tg3_flag(tp, MBOX_WRITE_REORDER))
545                 readl(mbox);
546 }
547
548 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 {
550         return readl(tp->regs + off + GRCMBOX_BASE);
551 }
552
553 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 {
555         writel(val, tp->regs + off + GRCMBOX_BASE);
556 }
557
558 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
559 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
560 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
561 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
562 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
563
564 #define tw32(reg, val)                  tp->write32(tp, reg, val)
565 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
566 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
567 #define tr32(reg)                       tp->read32(tp, reg)
568
569 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
570 {
571         unsigned long flags;
572
573         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
574             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
575                 return;
576
577         spin_lock_irqsave(&tp->indirect_lock, flags);
578         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
580                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581
582                 /* Always leave this as zero. */
583                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
584         } else {
585                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
586                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587
588                 /* Always leave this as zero. */
589                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590         }
591         spin_unlock_irqrestore(&tp->indirect_lock, flags);
592 }
593
594 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
595 {
596         unsigned long flags;
597
598         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
599             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
600                 *val = 0;
601                 return;
602         }
603
604         spin_lock_irqsave(&tp->indirect_lock, flags);
605         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
606                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
607                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608
609                 /* Always leave this as zero. */
610                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
611         } else {
612                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
613                 *val = tr32(TG3PCI_MEM_WIN_DATA);
614
615                 /* Always leave this as zero. */
616                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617         }
618         spin_unlock_irqrestore(&tp->indirect_lock, flags);
619 }
620
621 static void tg3_ape_lock_init(struct tg3 *tp)
622 {
623         int i;
624         u32 regbase, bit;
625
626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
627                 regbase = TG3_APE_LOCK_GRANT;
628         else
629                 regbase = TG3_APE_PER_LOCK_GRANT;
630
631         /* Make sure the driver hasn't any stale locks. */
632         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
633                 switch (i) {
634                 case TG3_APE_LOCK_PHY0:
635                 case TG3_APE_LOCK_PHY1:
636                 case TG3_APE_LOCK_PHY2:
637                 case TG3_APE_LOCK_PHY3:
638                         bit = APE_LOCK_GRANT_DRIVER;
639                         break;
640                 default:
641                         if (!tp->pci_fn)
642                                 bit = APE_LOCK_GRANT_DRIVER;
643                         else
644                                 bit = 1 << tp->pci_fn;
645                 }
646                 tg3_ape_write32(tp, regbase + 4 * i, bit);
647         }
648
649 }
650
651 static int tg3_ape_lock(struct tg3 *tp, int locknum)
652 {
653         int i, off;
654         int ret = 0;
655         u32 status, req, gnt, bit;
656
657         if (!tg3_flag(tp, ENABLE_APE))
658                 return 0;
659
660         switch (locknum) {
661         case TG3_APE_LOCK_GPIO:
662                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663                         return 0;
664         case TG3_APE_LOCK_GRC:
665         case TG3_APE_LOCK_MEM:
666                 if (!tp->pci_fn)
667                         bit = APE_LOCK_REQ_DRIVER;
668                 else
669                         bit = 1 << tp->pci_fn;
670                 break;
671         default:
672                 return -EINVAL;
673         }
674
675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
676                 req = TG3_APE_LOCK_REQ;
677                 gnt = TG3_APE_LOCK_GRANT;
678         } else {
679                 req = TG3_APE_PER_LOCK_REQ;
680                 gnt = TG3_APE_PER_LOCK_GRANT;
681         }
682
683         off = 4 * locknum;
684
685         tg3_ape_write32(tp, req + off, bit);
686
687         /* Wait for up to 1 millisecond to acquire lock. */
688         for (i = 0; i < 100; i++) {
689                 status = tg3_ape_read32(tp, gnt + off);
690                 if (status == bit)
691                         break;
692                 udelay(10);
693         }
694
695         if (status != bit) {
696                 /* Revoke the lock request. */
697                 tg3_ape_write32(tp, gnt + off, bit);
698                 ret = -EBUSY;
699         }
700
701         return ret;
702 }
703
704 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
705 {
706         u32 gnt, bit;
707
708         if (!tg3_flag(tp, ENABLE_APE))
709                 return;
710
711         switch (locknum) {
712         case TG3_APE_LOCK_GPIO:
713                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
714                         return;
715         case TG3_APE_LOCK_GRC:
716         case TG3_APE_LOCK_MEM:
717                 if (!tp->pci_fn)
718                         bit = APE_LOCK_GRANT_DRIVER;
719                 else
720                         bit = 1 << tp->pci_fn;
721                 break;
722         default:
723                 return;
724         }
725
726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
727                 gnt = TG3_APE_LOCK_GRANT;
728         else
729                 gnt = TG3_APE_PER_LOCK_GRANT;
730
731         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
732 }
733
734 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
735 {
736         int i;
737         u32 apedata;
738
739         /* NCSI does not support APE events */
740         if (tg3_flag(tp, APE_HAS_NCSI))
741                 return;
742
743         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
744         if (apedata != APE_SEG_SIG_MAGIC)
745                 return;
746
747         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
748         if (!(apedata & APE_FW_STATUS_READY))
749                 return;
750
751         /* Wait for up to 1 millisecond for APE to service previous event. */
752         for (i = 0; i < 10; i++) {
753                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
754                         return;
755
756                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757
758                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
759                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
760                                         event | APE_EVENT_STATUS_EVENT_PENDING);
761
762                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763
764                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
765                         break;
766
767                 udelay(100);
768         }
769
770         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
771                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
772 }
773
774 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
775 {
776         u32 event;
777         u32 apedata;
778
779         if (!tg3_flag(tp, ENABLE_APE))
780                 return;
781
782         switch (kind) {
783         case RESET_KIND_INIT:
784                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
785                                 APE_HOST_SEG_SIG_MAGIC);
786                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
787                                 APE_HOST_SEG_LEN_MAGIC);
788                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
789                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
790                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
791                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
792                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
793                                 APE_HOST_BEHAV_NO_PHYLOCK);
794                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
795                                     TG3_APE_HOST_DRVR_STATE_START);
796
797                 event = APE_EVENT_STATUS_STATE_START;
798                 break;
799         case RESET_KIND_SHUTDOWN:
800                 /* With the interface we are currently using,
801                  * APE does not track driver state.  Wiping
802                  * out the HOST SEGMENT SIGNATURE forces
803                  * the APE to assume OS absent status.
804                  */
805                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
806
807                 if (device_may_wakeup(&tp->pdev->dev) &&
808                     tg3_flag(tp, WOL_ENABLE)) {
809                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
810                                             TG3_APE_HOST_WOL_SPEED_AUTO);
811                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
812                 } else
813                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
814
815                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
816
817                 event = APE_EVENT_STATUS_STATE_UNLOAD;
818                 break;
819         case RESET_KIND_SUSPEND:
820                 event = APE_EVENT_STATUS_STATE_SUSPEND;
821                 break;
822         default:
823                 return;
824         }
825
826         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
827
828         tg3_ape_send_event(tp, event);
829 }
830
831 static void tg3_disable_ints(struct tg3 *tp)
832 {
833         int i;
834
835         tw32(TG3PCI_MISC_HOST_CTRL,
836              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
837         for (i = 0; i < tp->irq_max; i++)
838                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
839 }
840
841 static void tg3_enable_ints(struct tg3 *tp)
842 {
843         int i;
844
845         tp->irq_sync = 0;
846         wmb();
847
848         tw32(TG3PCI_MISC_HOST_CTRL,
849              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
850
851         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
852         for (i = 0; i < tp->irq_cnt; i++) {
853                 struct tg3_napi *tnapi = &tp->napi[i];
854
855                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856                 if (tg3_flag(tp, 1SHOT_MSI))
857                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
858
859                 tp->coal_now |= tnapi->coal_now;
860         }
861
862         /* Force an initial interrupt */
863         if (!tg3_flag(tp, TAGGED_STATUS) &&
864             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
865                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
866         else
867                 tw32(HOSTCC_MODE, tp->coal_now);
868
869         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
870 }
871
872 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
873 {
874         struct tg3 *tp = tnapi->tp;
875         struct tg3_hw_status *sblk = tnapi->hw_status;
876         unsigned int work_exists = 0;
877
878         /* check for phy events */
879         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
880                 if (sblk->status & SD_STATUS_LINK_CHG)
881                         work_exists = 1;
882         }
883         /* check for RX/TX work to do */
884         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
885             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
886                 work_exists = 1;
887
888         return work_exists;
889 }
890
891 /* tg3_int_reenable
892  *  similar to tg3_enable_ints, but it accurately determines whether there
893  *  is new work pending and can return without flushing the PIO write
894  *  which reenables interrupts
895  */
896 static void tg3_int_reenable(struct tg3_napi *tnapi)
897 {
898         struct tg3 *tp = tnapi->tp;
899
900         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
901         mmiowb();
902
903         /* When doing tagged status, this work check is unnecessary.
904          * The last_tag we write above tells the chip which piece of
905          * work we've completed.
906          */
907         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
908                 tw32(HOSTCC_MODE, tp->coalesce_mode |
909                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
910 }
911
912 static void tg3_switch_clocks(struct tg3 *tp)
913 {
914         u32 clock_ctrl;
915         u32 orig_clock_ctrl;
916
917         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
918                 return;
919
920         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
921
922         orig_clock_ctrl = clock_ctrl;
923         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
924                        CLOCK_CTRL_CLKRUN_OENABLE |
925                        0x1f);
926         tp->pci_clock_ctrl = clock_ctrl;
927
928         if (tg3_flag(tp, 5705_PLUS)) {
929                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
930                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
931                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
932                 }
933         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
934                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
935                             clock_ctrl |
936                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
937                             40);
938                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
939                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
940                             40);
941         }
942         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
943 }
944
945 #define PHY_BUSY_LOOPS  5000
946
947 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
948 {
949         u32 frame_val;
950         unsigned int loops;
951         int ret;
952
953         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
954                 tw32_f(MAC_MI_MODE,
955                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
956                 udelay(80);
957         }
958
959         *val = 0x0;
960
961         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
962                       MI_COM_PHY_ADDR_MASK);
963         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
964                       MI_COM_REG_ADDR_MASK);
965         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
966
967         tw32_f(MAC_MI_COM, frame_val);
968
969         loops = PHY_BUSY_LOOPS;
970         while (loops != 0) {
971                 udelay(10);
972                 frame_val = tr32(MAC_MI_COM);
973
974                 if ((frame_val & MI_COM_BUSY) == 0) {
975                         udelay(5);
976                         frame_val = tr32(MAC_MI_COM);
977                         break;
978                 }
979                 loops -= 1;
980         }
981
982         ret = -EBUSY;
983         if (loops != 0) {
984                 *val = frame_val & MI_COM_DATA_MASK;
985                 ret = 0;
986         }
987
988         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
989                 tw32_f(MAC_MI_MODE, tp->mi_mode);
990                 udelay(80);
991         }
992
993         return ret;
994 }
995
996 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
997 {
998         u32 frame_val;
999         unsigned int loops;
1000         int ret;
1001
1002         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1003             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1004                 return 0;
1005
1006         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1007                 tw32_f(MAC_MI_MODE,
1008                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1009                 udelay(80);
1010         }
1011
1012         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1013                       MI_COM_PHY_ADDR_MASK);
1014         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1015                       MI_COM_REG_ADDR_MASK);
1016         frame_val |= (val & MI_COM_DATA_MASK);
1017         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1018
1019         tw32_f(MAC_MI_COM, frame_val);
1020
1021         loops = PHY_BUSY_LOOPS;
1022         while (loops != 0) {
1023                 udelay(10);
1024                 frame_val = tr32(MAC_MI_COM);
1025                 if ((frame_val & MI_COM_BUSY) == 0) {
1026                         udelay(5);
1027                         frame_val = tr32(MAC_MI_COM);
1028                         break;
1029                 }
1030                 loops -= 1;
1031         }
1032
1033         ret = -EBUSY;
1034         if (loops != 0)
1035                 ret = 0;
1036
1037         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1038                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1039                 udelay(80);
1040         }
1041
1042         return ret;
1043 }
1044
1045 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1046 {
1047         int err;
1048
1049         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1050         if (err)
1051                 goto done;
1052
1053         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1054         if (err)
1055                 goto done;
1056
1057         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1058                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1059         if (err)
1060                 goto done;
1061
1062         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1063
1064 done:
1065         return err;
1066 }
1067
1068 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1069 {
1070         int err;
1071
1072         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1073         if (err)
1074                 goto done;
1075
1076         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1077         if (err)
1078                 goto done;
1079
1080         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1081                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1082         if (err)
1083                 goto done;
1084
1085         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1086
1087 done:
1088         return err;
1089 }
1090
1091 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1092 {
1093         int err;
1094
1095         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1096         if (!err)
1097                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1098
1099         return err;
1100 }
1101
1102 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1103 {
1104         int err;
1105
1106         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1107         if (!err)
1108                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1109
1110         return err;
1111 }
1112
1113 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1114 {
1115         int err;
1116
1117         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1118                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1119                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1120         if (!err)
1121                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1122
1123         return err;
1124 }
1125
1126 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1127 {
1128         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1129                 set |= MII_TG3_AUXCTL_MISC_WREN;
1130
1131         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1132 }
1133
1134 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1135         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1136                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1137                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1138
1139 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1140         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1141                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1142
1143 static int tg3_bmcr_reset(struct tg3 *tp)
1144 {
1145         u32 phy_control;
1146         int limit, err;
1147
1148         /* OK, reset it, and poll the BMCR_RESET bit until it
1149          * clears or we time out.
1150          */
1151         phy_control = BMCR_RESET;
1152         err = tg3_writephy(tp, MII_BMCR, phy_control);
1153         if (err != 0)
1154                 return -EBUSY;
1155
1156         limit = 5000;
1157         while (limit--) {
1158                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1159                 if (err != 0)
1160                         return -EBUSY;
1161
1162                 if ((phy_control & BMCR_RESET) == 0) {
1163                         udelay(40);
1164                         break;
1165                 }
1166                 udelay(10);
1167         }
1168         if (limit < 0)
1169                 return -EBUSY;
1170
1171         return 0;
1172 }
1173
1174 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1175 {
1176         struct tg3 *tp = bp->priv;
1177         u32 val;
1178
1179         spin_lock_bh(&tp->lock);
1180
1181         if (tg3_readphy(tp, reg, &val))
1182                 val = -EIO;
1183
1184         spin_unlock_bh(&tp->lock);
1185
1186         return val;
1187 }
1188
1189 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1190 {
1191         struct tg3 *tp = bp->priv;
1192         u32 ret = 0;
1193
1194         spin_lock_bh(&tp->lock);
1195
1196         if (tg3_writephy(tp, reg, val))
1197                 ret = -EIO;
1198
1199         spin_unlock_bh(&tp->lock);
1200
1201         return ret;
1202 }
1203
1204 static int tg3_mdio_reset(struct mii_bus *bp)
1205 {
1206         return 0;
1207 }
1208
1209 static void tg3_mdio_config_5785(struct tg3 *tp)
1210 {
1211         u32 val;
1212         struct phy_device *phydev;
1213
1214         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1215         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1216         case PHY_ID_BCM50610:
1217         case PHY_ID_BCM50610M:
1218                 val = MAC_PHYCFG2_50610_LED_MODES;
1219                 break;
1220         case PHY_ID_BCMAC131:
1221                 val = MAC_PHYCFG2_AC131_LED_MODES;
1222                 break;
1223         case PHY_ID_RTL8211C:
1224                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1225                 break;
1226         case PHY_ID_RTL8201E:
1227                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1228                 break;
1229         default:
1230                 return;
1231         }
1232
1233         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1234                 tw32(MAC_PHYCFG2, val);
1235
1236                 val = tr32(MAC_PHYCFG1);
1237                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1238                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1239                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1240                 tw32(MAC_PHYCFG1, val);
1241
1242                 return;
1243         }
1244
1245         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1246                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1247                        MAC_PHYCFG2_FMODE_MASK_MASK |
1248                        MAC_PHYCFG2_GMODE_MASK_MASK |
1249                        MAC_PHYCFG2_ACT_MASK_MASK   |
1250                        MAC_PHYCFG2_QUAL_MASK_MASK |
1251                        MAC_PHYCFG2_INBAND_ENABLE;
1252
1253         tw32(MAC_PHYCFG2, val);
1254
1255         val = tr32(MAC_PHYCFG1);
1256         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1257                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1258         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1259                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1260                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1261                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1262                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1263         }
1264         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1265                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1266         tw32(MAC_PHYCFG1, val);
1267
1268         val = tr32(MAC_EXT_RGMII_MODE);
1269         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1270                  MAC_RGMII_MODE_RX_QUALITY |
1271                  MAC_RGMII_MODE_RX_ACTIVITY |
1272                  MAC_RGMII_MODE_RX_ENG_DET |
1273                  MAC_RGMII_MODE_TX_ENABLE |
1274                  MAC_RGMII_MODE_TX_LOWPWR |
1275                  MAC_RGMII_MODE_TX_RESET);
1276         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1277                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1278                         val |= MAC_RGMII_MODE_RX_INT_B |
1279                                MAC_RGMII_MODE_RX_QUALITY |
1280                                MAC_RGMII_MODE_RX_ACTIVITY |
1281                                MAC_RGMII_MODE_RX_ENG_DET;
1282                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1283                         val |= MAC_RGMII_MODE_TX_ENABLE |
1284                                MAC_RGMII_MODE_TX_LOWPWR |
1285                                MAC_RGMII_MODE_TX_RESET;
1286         }
1287         tw32(MAC_EXT_RGMII_MODE, val);
1288 }
1289
1290 static void tg3_mdio_start(struct tg3 *tp)
1291 {
1292         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1293         tw32_f(MAC_MI_MODE, tp->mi_mode);
1294         udelay(80);
1295
1296         if (tg3_flag(tp, MDIOBUS_INITED) &&
1297             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1298                 tg3_mdio_config_5785(tp);
1299 }
1300
1301 static int tg3_mdio_init(struct tg3 *tp)
1302 {
1303         int i;
1304         u32 reg;
1305         struct phy_device *phydev;
1306
1307         if (tg3_flag(tp, 5717_PLUS)) {
1308                 u32 is_serdes;
1309
1310                 tp->phy_addr = tp->pci_fn + 1;
1311
1312                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1313                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1314                 else
1315                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1316                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1317                 if (is_serdes)
1318                         tp->phy_addr += 7;
1319         } else
1320                 tp->phy_addr = TG3_PHY_MII_ADDR;
1321
1322         tg3_mdio_start(tp);
1323
1324         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1325                 return 0;
1326
1327         tp->mdio_bus = mdiobus_alloc();
1328         if (tp->mdio_bus == NULL)
1329                 return -ENOMEM;
1330
1331         tp->mdio_bus->name     = "tg3 mdio bus";
1332         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1333                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1334         tp->mdio_bus->priv     = tp;
1335         tp->mdio_bus->parent   = &tp->pdev->dev;
1336         tp->mdio_bus->read     = &tg3_mdio_read;
1337         tp->mdio_bus->write    = &tg3_mdio_write;
1338         tp->mdio_bus->reset    = &tg3_mdio_reset;
1339         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1340         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1341
1342         for (i = 0; i < PHY_MAX_ADDR; i++)
1343                 tp->mdio_bus->irq[i] = PHY_POLL;
1344
1345         /* The bus registration will look for all the PHYs on the mdio bus.
1346          * Unfortunately, it does not ensure the PHY is powered up before
1347          * accessing the PHY ID registers.  A chip reset is the
1348          * quickest way to bring the device back to an operational state..
1349          */
1350         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1351                 tg3_bmcr_reset(tp);
1352
1353         i = mdiobus_register(tp->mdio_bus);
1354         if (i) {
1355                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1356                 mdiobus_free(tp->mdio_bus);
1357                 return i;
1358         }
1359
1360         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1361
1362         if (!phydev || !phydev->drv) {
1363                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1364                 mdiobus_unregister(tp->mdio_bus);
1365                 mdiobus_free(tp->mdio_bus);
1366                 return -ENODEV;
1367         }
1368
1369         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1370         case PHY_ID_BCM57780:
1371                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1372                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1373                 break;
1374         case PHY_ID_BCM50610:
1375         case PHY_ID_BCM50610M:
1376                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1377                                      PHY_BRCM_RX_REFCLK_UNUSED |
1378                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1379                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1380                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1381                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1382                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1383                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1384                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1385                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1386                 /* fallthru */
1387         case PHY_ID_RTL8211C:
1388                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1389                 break;
1390         case PHY_ID_RTL8201E:
1391         case PHY_ID_BCMAC131:
1392                 phydev->interface = PHY_INTERFACE_MODE_MII;
1393                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1394                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1395                 break;
1396         }
1397
1398         tg3_flag_set(tp, MDIOBUS_INITED);
1399
1400         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1401                 tg3_mdio_config_5785(tp);
1402
1403         return 0;
1404 }
1405
1406 static void tg3_mdio_fini(struct tg3 *tp)
1407 {
1408         if (tg3_flag(tp, MDIOBUS_INITED)) {
1409                 tg3_flag_clear(tp, MDIOBUS_INITED);
1410                 mdiobus_unregister(tp->mdio_bus);
1411                 mdiobus_free(tp->mdio_bus);
1412         }
1413 }
1414
1415 /* tp->lock is held. */
1416 static inline void tg3_generate_fw_event(struct tg3 *tp)
1417 {
1418         u32 val;
1419
1420         val = tr32(GRC_RX_CPU_EVENT);
1421         val |= GRC_RX_CPU_DRIVER_EVENT;
1422         tw32_f(GRC_RX_CPU_EVENT, val);
1423
1424         tp->last_event_jiffies = jiffies;
1425 }
1426
1427 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1428
1429 /* tp->lock is held. */
1430 static void tg3_wait_for_event_ack(struct tg3 *tp)
1431 {
1432         int i;
1433         unsigned int delay_cnt;
1434         long time_remain;
1435
1436         /* If enough time has passed, no wait is necessary. */
1437         time_remain = (long)(tp->last_event_jiffies + 1 +
1438                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1439                       (long)jiffies;
1440         if (time_remain < 0)
1441                 return;
1442
1443         /* Check if we can shorten the wait time. */
1444         delay_cnt = jiffies_to_usecs(time_remain);
1445         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1446                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1447         delay_cnt = (delay_cnt >> 3) + 1;
1448
1449         for (i = 0; i < delay_cnt; i++) {
1450                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1451                         break;
1452                 udelay(8);
1453         }
1454 }
1455
1456 /* tp->lock is held. */
1457 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1458 {
1459         u32 reg, val;
1460
1461         val = 0;
1462         if (!tg3_readphy(tp, MII_BMCR, &reg))
1463                 val = reg << 16;
1464         if (!tg3_readphy(tp, MII_BMSR, &reg))
1465                 val |= (reg & 0xffff);
1466         *data++ = val;
1467
1468         val = 0;
1469         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1470                 val = reg << 16;
1471         if (!tg3_readphy(tp, MII_LPA, &reg))
1472                 val |= (reg & 0xffff);
1473         *data++ = val;
1474
1475         val = 0;
1476         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1477                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1478                         val = reg << 16;
1479                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1480                         val |= (reg & 0xffff);
1481         }
1482         *data++ = val;
1483
1484         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1485                 val = reg << 16;
1486         else
1487                 val = 0;
1488         *data++ = val;
1489 }
1490
1491 /* tp->lock is held. */
1492 static void tg3_ump_link_report(struct tg3 *tp)
1493 {
1494         u32 data[4];
1495
1496         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1497                 return;
1498
1499         tg3_phy_gather_ump_data(tp, data);
1500
1501         tg3_wait_for_event_ack(tp);
1502
1503         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1504         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1505         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1506         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1507         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1508         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1509
1510         tg3_generate_fw_event(tp);
1511 }
1512
1513 /* tp->lock is held. */
1514 static void tg3_stop_fw(struct tg3 *tp)
1515 {
1516         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1517                 /* Wait for RX cpu to ACK the previous event. */
1518                 tg3_wait_for_event_ack(tp);
1519
1520                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1521
1522                 tg3_generate_fw_event(tp);
1523
1524                 /* Wait for RX cpu to ACK this event. */
1525                 tg3_wait_for_event_ack(tp);
1526         }
1527 }
1528
1529 /* tp->lock is held. */
1530 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1531 {
1532         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1533                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1534
1535         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1536                 switch (kind) {
1537                 case RESET_KIND_INIT:
1538                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1539                                       DRV_STATE_START);
1540                         break;
1541
1542                 case RESET_KIND_SHUTDOWN:
1543                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1544                                       DRV_STATE_UNLOAD);
1545                         break;
1546
1547                 case RESET_KIND_SUSPEND:
1548                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1549                                       DRV_STATE_SUSPEND);
1550                         break;
1551
1552                 default:
1553                         break;
1554                 }
1555         }
1556
1557         if (kind == RESET_KIND_INIT ||
1558             kind == RESET_KIND_SUSPEND)
1559                 tg3_ape_driver_state_change(tp, kind);
1560 }
1561
1562 /* tp->lock is held. */
1563 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1564 {
1565         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1566                 switch (kind) {
1567                 case RESET_KIND_INIT:
1568                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1569                                       DRV_STATE_START_DONE);
1570                         break;
1571
1572                 case RESET_KIND_SHUTDOWN:
1573                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1574                                       DRV_STATE_UNLOAD_DONE);
1575                         break;
1576
1577                 default:
1578                         break;
1579                 }
1580         }
1581
1582         if (kind == RESET_KIND_SHUTDOWN)
1583                 tg3_ape_driver_state_change(tp, kind);
1584 }
1585
1586 /* tp->lock is held. */
1587 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1588 {
1589         if (tg3_flag(tp, ENABLE_ASF)) {
1590                 switch (kind) {
1591                 case RESET_KIND_INIT:
1592                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1593                                       DRV_STATE_START);
1594                         break;
1595
1596                 case RESET_KIND_SHUTDOWN:
1597                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1598                                       DRV_STATE_UNLOAD);
1599                         break;
1600
1601                 case RESET_KIND_SUSPEND:
1602                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1603                                       DRV_STATE_SUSPEND);
1604                         break;
1605
1606                 default:
1607                         break;
1608                 }
1609         }
1610 }
1611
1612 static int tg3_poll_fw(struct tg3 *tp)
1613 {
1614         int i;
1615         u32 val;
1616
1617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1618                 /* Wait up to 20ms for init done. */
1619                 for (i = 0; i < 200; i++) {
1620                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1621                                 return 0;
1622                         udelay(100);
1623                 }
1624                 return -ENODEV;
1625         }
1626
1627         /* Wait for firmware initialization to complete. */
1628         for (i = 0; i < 100000; i++) {
1629                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1630                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1631                         break;
1632                 udelay(10);
1633         }
1634
1635         /* Chip might not be fitted with firmware.  Some Sun onboard
1636          * parts are configured like that.  So don't signal the timeout
1637          * of the above loop as an error, but do report the lack of
1638          * running firmware once.
1639          */
1640         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1641                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1642
1643                 netdev_info(tp->dev, "No firmware running\n");
1644         }
1645
1646         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1647                 /* The 57765 A0 needs a little more
1648                  * time to do some important work.
1649                  */
1650                 mdelay(10);
1651         }
1652
1653         return 0;
1654 }
1655
1656 static void tg3_link_report(struct tg3 *tp)
1657 {
1658         if (!netif_carrier_ok(tp->dev)) {
1659                 netif_info(tp, link, tp->dev, "Link is down\n");
1660                 tg3_ump_link_report(tp);
1661         } else if (netif_msg_link(tp)) {
1662                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1663                             (tp->link_config.active_speed == SPEED_1000 ?
1664                              1000 :
1665                              (tp->link_config.active_speed == SPEED_100 ?
1666                               100 : 10)),
1667                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1668                              "full" : "half"));
1669
1670                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1671                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1672                             "on" : "off",
1673                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1674                             "on" : "off");
1675
1676                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1677                         netdev_info(tp->dev, "EEE is %s\n",
1678                                     tp->setlpicnt ? "enabled" : "disabled");
1679
1680                 tg3_ump_link_report(tp);
1681         }
1682 }
1683
1684 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1685 {
1686         u16 miireg;
1687
1688         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1689                 miireg = ADVERTISE_1000XPAUSE;
1690         else if (flow_ctrl & FLOW_CTRL_TX)
1691                 miireg = ADVERTISE_1000XPSE_ASYM;
1692         else if (flow_ctrl & FLOW_CTRL_RX)
1693                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1694         else
1695                 miireg = 0;
1696
1697         return miireg;
1698 }
1699
1700 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1701 {
1702         u8 cap = 0;
1703
1704         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1705                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1706         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1707                 if (lcladv & ADVERTISE_1000XPAUSE)
1708                         cap = FLOW_CTRL_RX;
1709                 if (rmtadv & ADVERTISE_1000XPAUSE)
1710                         cap = FLOW_CTRL_TX;
1711         }
1712
1713         return cap;
1714 }
1715
1716 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1717 {
1718         u8 autoneg;
1719         u8 flowctrl = 0;
1720         u32 old_rx_mode = tp->rx_mode;
1721         u32 old_tx_mode = tp->tx_mode;
1722
1723         if (tg3_flag(tp, USE_PHYLIB))
1724                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1725         else
1726                 autoneg = tp->link_config.autoneg;
1727
1728         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1729                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1730                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1731                 else
1732                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1733         } else
1734                 flowctrl = tp->link_config.flowctrl;
1735
1736         tp->link_config.active_flowctrl = flowctrl;
1737
1738         if (flowctrl & FLOW_CTRL_RX)
1739                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1740         else
1741                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1742
1743         if (old_rx_mode != tp->rx_mode)
1744                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1745
1746         if (flowctrl & FLOW_CTRL_TX)
1747                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1748         else
1749                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1750
1751         if (old_tx_mode != tp->tx_mode)
1752                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1753 }
1754
1755 static void tg3_adjust_link(struct net_device *dev)
1756 {
1757         u8 oldflowctrl, linkmesg = 0;
1758         u32 mac_mode, lcl_adv, rmt_adv;
1759         struct tg3 *tp = netdev_priv(dev);
1760         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1761
1762         spin_lock_bh(&tp->lock);
1763
1764         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1765                                     MAC_MODE_HALF_DUPLEX);
1766
1767         oldflowctrl = tp->link_config.active_flowctrl;
1768
1769         if (phydev->link) {
1770                 lcl_adv = 0;
1771                 rmt_adv = 0;
1772
1773                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1774                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1775                 else if (phydev->speed == SPEED_1000 ||
1776                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1777                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1778                 else
1779                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1780
1781                 if (phydev->duplex == DUPLEX_HALF)
1782                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1783                 else {
1784                         lcl_adv = mii_advertise_flowctrl(
1785                                   tp->link_config.flowctrl);
1786
1787                         if (phydev->pause)
1788                                 rmt_adv = LPA_PAUSE_CAP;
1789                         if (phydev->asym_pause)
1790                                 rmt_adv |= LPA_PAUSE_ASYM;
1791                 }
1792
1793                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1794         } else
1795                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1796
1797         if (mac_mode != tp->mac_mode) {
1798                 tp->mac_mode = mac_mode;
1799                 tw32_f(MAC_MODE, tp->mac_mode);
1800                 udelay(40);
1801         }
1802
1803         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1804                 if (phydev->speed == SPEED_10)
1805                         tw32(MAC_MI_STAT,
1806                              MAC_MI_STAT_10MBPS_MODE |
1807                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1808                 else
1809                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1810         }
1811
1812         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1813                 tw32(MAC_TX_LENGTHS,
1814                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1815                       (6 << TX_LENGTHS_IPG_SHIFT) |
1816                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1817         else
1818                 tw32(MAC_TX_LENGTHS,
1819                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1820                       (6 << TX_LENGTHS_IPG_SHIFT) |
1821                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1822
1823         if (phydev->link != tp->old_link ||
1824             phydev->speed != tp->link_config.active_speed ||
1825             phydev->duplex != tp->link_config.active_duplex ||
1826             oldflowctrl != tp->link_config.active_flowctrl)
1827                 linkmesg = 1;
1828
1829         tp->old_link = phydev->link;
1830         tp->link_config.active_speed = phydev->speed;
1831         tp->link_config.active_duplex = phydev->duplex;
1832
1833         spin_unlock_bh(&tp->lock);
1834
1835         if (linkmesg)
1836                 tg3_link_report(tp);
1837 }
1838
1839 static int tg3_phy_init(struct tg3 *tp)
1840 {
1841         struct phy_device *phydev;
1842
1843         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1844                 return 0;
1845
1846         /* Bring the PHY back to a known state. */
1847         tg3_bmcr_reset(tp);
1848
1849         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1850
1851         /* Attach the MAC to the PHY. */
1852         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1853                              phydev->dev_flags, phydev->interface);
1854         if (IS_ERR(phydev)) {
1855                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1856                 return PTR_ERR(phydev);
1857         }
1858
1859         /* Mask with MAC supported features. */
1860         switch (phydev->interface) {
1861         case PHY_INTERFACE_MODE_GMII:
1862         case PHY_INTERFACE_MODE_RGMII:
1863                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1864                         phydev->supported &= (PHY_GBIT_FEATURES |
1865                                               SUPPORTED_Pause |
1866                                               SUPPORTED_Asym_Pause);
1867                         break;
1868                 }
1869                 /* fallthru */
1870         case PHY_INTERFACE_MODE_MII:
1871                 phydev->supported &= (PHY_BASIC_FEATURES |
1872                                       SUPPORTED_Pause |
1873                                       SUPPORTED_Asym_Pause);
1874                 break;
1875         default:
1876                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1877                 return -EINVAL;
1878         }
1879
1880         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1881
1882         phydev->advertising = phydev->supported;
1883
1884         return 0;
1885 }
1886
1887 static void tg3_phy_start(struct tg3 *tp)
1888 {
1889         struct phy_device *phydev;
1890
1891         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1892                 return;
1893
1894         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1895
1896         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1897                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1898                 phydev->speed = tp->link_config.speed;
1899                 phydev->duplex = tp->link_config.duplex;
1900                 phydev->autoneg = tp->link_config.autoneg;
1901                 phydev->advertising = tp->link_config.advertising;
1902         }
1903
1904         phy_start(phydev);
1905
1906         phy_start_aneg(phydev);
1907 }
1908
1909 static void tg3_phy_stop(struct tg3 *tp)
1910 {
1911         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1912                 return;
1913
1914         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1915 }
1916
1917 static void tg3_phy_fini(struct tg3 *tp)
1918 {
1919         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1920                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1921                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1922         }
1923 }
1924
1925 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1926 {
1927         int err;
1928         u32 val;
1929
1930         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1931                 return 0;
1932
1933         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1934                 /* Cannot do read-modify-write on 5401 */
1935                 err = tg3_phy_auxctl_write(tp,
1936                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1937                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1938                                            0x4c20);
1939                 goto done;
1940         }
1941
1942         err = tg3_phy_auxctl_read(tp,
1943                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1944         if (err)
1945                 return err;
1946
1947         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1948         err = tg3_phy_auxctl_write(tp,
1949                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1950
1951 done:
1952         return err;
1953 }
1954
1955 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1956 {
1957         u32 phytest;
1958
1959         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1960                 u32 phy;
1961
1962                 tg3_writephy(tp, MII_TG3_FET_TEST,
1963                              phytest | MII_TG3_FET_SHADOW_EN);
1964                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1965                         if (enable)
1966                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1967                         else
1968                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1969                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1970                 }
1971                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1972         }
1973 }
1974
1975 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1976 {
1977         u32 reg;
1978
1979         if (!tg3_flag(tp, 5705_PLUS) ||
1980             (tg3_flag(tp, 5717_PLUS) &&
1981              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1982                 return;
1983
1984         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1985                 tg3_phy_fet_toggle_apd(tp, enable);
1986                 return;
1987         }
1988
1989         reg = MII_TG3_MISC_SHDW_WREN |
1990               MII_TG3_MISC_SHDW_SCR5_SEL |
1991               MII_TG3_MISC_SHDW_SCR5_LPED |
1992               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1993               MII_TG3_MISC_SHDW_SCR5_SDTL |
1994               MII_TG3_MISC_SHDW_SCR5_C125OE;
1995         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1996                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1997
1998         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1999
2000
2001         reg = MII_TG3_MISC_SHDW_WREN |
2002               MII_TG3_MISC_SHDW_APD_SEL |
2003               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2004         if (enable)
2005                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2006
2007         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2008 }
2009
2010 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2011 {
2012         u32 phy;
2013
2014         if (!tg3_flag(tp, 5705_PLUS) ||
2015             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2016                 return;
2017
2018         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2019                 u32 ephy;
2020
2021                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2022                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2023
2024                         tg3_writephy(tp, MII_TG3_FET_TEST,
2025                                      ephy | MII_TG3_FET_SHADOW_EN);
2026                         if (!tg3_readphy(tp, reg, &phy)) {
2027                                 if (enable)
2028                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2029                                 else
2030                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2031                                 tg3_writephy(tp, reg, phy);
2032                         }
2033                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2034                 }
2035         } else {
2036                 int ret;
2037
2038                 ret = tg3_phy_auxctl_read(tp,
2039                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2040                 if (!ret) {
2041                         if (enable)
2042                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2043                         else
2044                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2045                         tg3_phy_auxctl_write(tp,
2046                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2047                 }
2048         }
2049 }
2050
2051 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2052 {
2053         int ret;
2054         u32 val;
2055
2056         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2057                 return;
2058
2059         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2060         if (!ret)
2061                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2062                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2063 }
2064
2065 static void tg3_phy_apply_otp(struct tg3 *tp)
2066 {
2067         u32 otp, phy;
2068
2069         if (!tp->phy_otp)
2070                 return;
2071
2072         otp = tp->phy_otp;
2073
2074         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2075                 return;
2076
2077         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2078         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2079         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2080
2081         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2082               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2083         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2084
2085         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2086         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2087         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2088
2089         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2090         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2091
2092         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2093         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2094
2095         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2096               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2097         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2098
2099         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2100 }
2101
2102 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2103 {
2104         u32 val;
2105
2106         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2107                 return;
2108
2109         tp->setlpicnt = 0;
2110
2111         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2112             current_link_up == 1 &&
2113             tp->link_config.active_duplex == DUPLEX_FULL &&
2114             (tp->link_config.active_speed == SPEED_100 ||
2115              tp->link_config.active_speed == SPEED_1000)) {
2116                 u32 eeectl;
2117
2118                 if (tp->link_config.active_speed == SPEED_1000)
2119                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2120                 else
2121                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2122
2123                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2124
2125                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2126                                   TG3_CL45_D7_EEERES_STAT, &val);
2127
2128                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2129                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2130                         tp->setlpicnt = 2;
2131         }
2132
2133         if (!tp->setlpicnt) {
2134                 if (current_link_up == 1 &&
2135                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2136                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2137                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2138                 }
2139
2140                 val = tr32(TG3_CPMU_EEE_MODE);
2141                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2142         }
2143 }
2144
2145 static void tg3_phy_eee_enable(struct tg3 *tp)
2146 {
2147         u32 val;
2148
2149         if (tp->link_config.active_speed == SPEED_1000 &&
2150             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2151              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2152              tg3_flag(tp, 57765_CLASS)) &&
2153             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2154                 val = MII_TG3_DSP_TAP26_ALNOKO |
2155                       MII_TG3_DSP_TAP26_RMRXSTO;
2156                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2157                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2158         }
2159
2160         val = tr32(TG3_CPMU_EEE_MODE);
2161         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2162 }
2163
2164 static int tg3_wait_macro_done(struct tg3 *tp)
2165 {
2166         int limit = 100;
2167
2168         while (limit--) {
2169                 u32 tmp32;
2170
2171                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2172                         if ((tmp32 & 0x1000) == 0)
2173                                 break;
2174                 }
2175         }
2176         if (limit < 0)
2177                 return -EBUSY;
2178
2179         return 0;
2180 }
2181
2182 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2183 {
2184         static const u32 test_pat[4][6] = {
2185         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2186         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2187         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2188         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2189         };
2190         int chan;
2191
2192         for (chan = 0; chan < 4; chan++) {
2193                 int i;
2194
2195                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2196                              (chan * 0x2000) | 0x0200);
2197                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2198
2199                 for (i = 0; i < 6; i++)
2200                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2201                                      test_pat[chan][i]);
2202
2203                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2204                 if (tg3_wait_macro_done(tp)) {
2205                         *resetp = 1;
2206                         return -EBUSY;
2207                 }
2208
2209                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2210                              (chan * 0x2000) | 0x0200);
2211                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2212                 if (tg3_wait_macro_done(tp)) {
2213                         *resetp = 1;
2214                         return -EBUSY;
2215                 }
2216
2217                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2218                 if (tg3_wait_macro_done(tp)) {
2219                         *resetp = 1;
2220                         return -EBUSY;
2221                 }
2222
2223                 for (i = 0; i < 6; i += 2) {
2224                         u32 low, high;
2225
2226                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2227                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2228                             tg3_wait_macro_done(tp)) {
2229                                 *resetp = 1;
2230                                 return -EBUSY;
2231                         }
2232                         low &= 0x7fff;
2233                         high &= 0x000f;
2234                         if (low != test_pat[chan][i] ||
2235                             high != test_pat[chan][i+1]) {
2236                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2237                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2238                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2239
2240                                 return -EBUSY;
2241                         }
2242                 }
2243         }
2244
2245         return 0;
2246 }
2247
2248 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2249 {
2250         int chan;
2251
2252         for (chan = 0; chan < 4; chan++) {
2253                 int i;
2254
2255                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2256                              (chan * 0x2000) | 0x0200);
2257                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2258                 for (i = 0; i < 6; i++)
2259                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2260                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2261                 if (tg3_wait_macro_done(tp))
2262                         return -EBUSY;
2263         }
2264
2265         return 0;
2266 }
2267
2268 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2269 {
2270         u32 reg32, phy9_orig;
2271         int retries, do_phy_reset, err;
2272
2273         retries = 10;
2274         do_phy_reset = 1;
2275         do {
2276                 if (do_phy_reset) {
2277                         err = tg3_bmcr_reset(tp);
2278                         if (err)
2279                                 return err;
2280                         do_phy_reset = 0;
2281                 }
2282
2283                 /* Disable transmitter and interrupt.  */
2284                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2285                         continue;
2286
2287                 reg32 |= 0x3000;
2288                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2289
2290                 /* Set full-duplex, 1000 mbps.  */
2291                 tg3_writephy(tp, MII_BMCR,
2292                              BMCR_FULLDPLX | BMCR_SPEED1000);
2293
2294                 /* Set to master mode.  */
2295                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2296                         continue;
2297
2298                 tg3_writephy(tp, MII_CTRL1000,
2299                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2300
2301                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2302                 if (err)
2303                         return err;
2304
2305                 /* Block the PHY control access.  */
2306                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2307
2308                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2309                 if (!err)
2310                         break;
2311         } while (--retries);
2312
2313         err = tg3_phy_reset_chanpat(tp);
2314         if (err)
2315                 return err;
2316
2317         tg3_phydsp_write(tp, 0x8005, 0x0000);
2318
2319         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2320         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2321
2322         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2323
2324         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2325
2326         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2327                 reg32 &= ~0x3000;
2328                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2329         } else if (!err)
2330                 err = -EBUSY;
2331
2332         return err;
2333 }
2334
2335 /* This will reset the tigon3 PHY if there is no valid
2336  * link unless the FORCE argument is non-zero.
2337  */
2338 static int tg3_phy_reset(struct tg3 *tp)
2339 {
2340         u32 val, cpmuctrl;
2341         int err;
2342
2343         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2344                 val = tr32(GRC_MISC_CFG);
2345                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2346                 udelay(40);
2347         }
2348         err  = tg3_readphy(tp, MII_BMSR, &val);
2349         err |= tg3_readphy(tp, MII_BMSR, &val);
2350         if (err != 0)
2351                 return -EBUSY;
2352
2353         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2354                 netif_carrier_off(tp->dev);
2355                 tg3_link_report(tp);
2356         }
2357
2358         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2360             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2361                 err = tg3_phy_reset_5703_4_5(tp);
2362                 if (err)
2363                         return err;
2364                 goto out;
2365         }
2366
2367         cpmuctrl = 0;
2368         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2369             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2370                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2371                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2372                         tw32(TG3_CPMU_CTRL,
2373                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2374         }
2375
2376         err = tg3_bmcr_reset(tp);
2377         if (err)
2378                 return err;
2379
2380         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2381                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2382                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2383
2384                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2385         }
2386
2387         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2388             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2389                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2390                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2391                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2392                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2393                         udelay(40);
2394                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2395                 }
2396         }
2397
2398         if (tg3_flag(tp, 5717_PLUS) &&
2399             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2400                 return 0;
2401
2402         tg3_phy_apply_otp(tp);
2403
2404         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2405                 tg3_phy_toggle_apd(tp, true);
2406         else
2407                 tg3_phy_toggle_apd(tp, false);
2408
2409 out:
2410         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2411             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2412                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2413                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2414                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2415         }
2416
2417         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2418                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2419                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2420         }
2421
2422         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2423                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2424                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2425                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2426                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2427                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2428                 }
2429         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2430                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2431                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2432                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2433                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2434                                 tg3_writephy(tp, MII_TG3_TEST1,
2435                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2436                         } else
2437                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2438
2439                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2440                 }
2441         }
2442
2443         /* Set Extended packet length bit (bit 14) on all chips that */
2444         /* support jumbo frames */
2445         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2446                 /* Cannot do read-modify-write on 5401 */
2447                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2448         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2449                 /* Set bit 14 with read-modify-write to preserve other bits */
2450                 err = tg3_phy_auxctl_read(tp,
2451                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2452                 if (!err)
2453                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2454                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2455         }
2456
2457         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2458          * jumbo frames transmission.
2459          */
2460         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2461                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2462                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2463                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2464         }
2465
2466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2467                 /* adjust output voltage */
2468                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2469         }
2470
2471         tg3_phy_toggle_automdix(tp, 1);
2472         tg3_phy_set_wirespeed(tp);
2473         return 0;
2474 }
2475
2476 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2477 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2478 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2479                                           TG3_GPIO_MSG_NEED_VAUX)
2480 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2481         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2482          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2483          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2484          (TG3_GPIO_MSG_DRVR_PRES << 12))
2485
2486 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2487         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2488          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2489          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2490          (TG3_GPIO_MSG_NEED_VAUX << 12))
2491
2492 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2493 {
2494         u32 status, shift;
2495
2496         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2497             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2498                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2499         else
2500                 status = tr32(TG3_CPMU_DRV_STATUS);
2501
2502         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2503         status &= ~(TG3_GPIO_MSG_MASK << shift);
2504         status |= (newstat << shift);
2505
2506         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2507             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2508                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2509         else
2510                 tw32(TG3_CPMU_DRV_STATUS, status);
2511
2512         return status >> TG3_APE_GPIO_MSG_SHIFT;
2513 }
2514
2515 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2516 {
2517         if (!tg3_flag(tp, IS_NIC))
2518                 return 0;
2519
2520         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2522             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2523                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2524                         return -EIO;
2525
2526                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2527
2528                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2529                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2530
2531                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2532         } else {
2533                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2534                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2535         }
2536
2537         return 0;
2538 }
2539
2540 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2541 {
2542         u32 grc_local_ctrl;
2543
2544         if (!tg3_flag(tp, IS_NIC) ||
2545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2546             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2547                 return;
2548
2549         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2550
2551         tw32_wait_f(GRC_LOCAL_CTRL,
2552                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2553                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2554
2555         tw32_wait_f(GRC_LOCAL_CTRL,
2556                     grc_local_ctrl,
2557                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2558
2559         tw32_wait_f(GRC_LOCAL_CTRL,
2560                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2561                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2562 }
2563
2564 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2565 {
2566         if (!tg3_flag(tp, IS_NIC))
2567                 return;
2568
2569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2570             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2571                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2572                             (GRC_LCLCTRL_GPIO_OE0 |
2573                              GRC_LCLCTRL_GPIO_OE1 |
2574                              GRC_LCLCTRL_GPIO_OE2 |
2575                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2576                              GRC_LCLCTRL_GPIO_OUTPUT1),
2577                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2578         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2579                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2580                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2581                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2582                                      GRC_LCLCTRL_GPIO_OE1 |
2583                                      GRC_LCLCTRL_GPIO_OE2 |
2584                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2585                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2586                                      tp->grc_local_ctrl;
2587                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2588                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2589
2590                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2591                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2592                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2593
2594                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2595                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2596                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2597         } else {
2598                 u32 no_gpio2;
2599                 u32 grc_local_ctrl = 0;
2600
2601                 /* Workaround to prevent overdrawing Amps. */
2602                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2603                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2604                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2605                                     grc_local_ctrl,
2606                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2607                 }
2608
2609                 /* On 5753 and variants, GPIO2 cannot be used. */
2610                 no_gpio2 = tp->nic_sram_data_cfg &
2611                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2612
2613                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2614                                   GRC_LCLCTRL_GPIO_OE1 |
2615                                   GRC_LCLCTRL_GPIO_OE2 |
2616                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2617                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2618                 if (no_gpio2) {
2619                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2620                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2621                 }
2622                 tw32_wait_f(GRC_LOCAL_CTRL,
2623                             tp->grc_local_ctrl | grc_local_ctrl,
2624                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2625
2626                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2627
2628                 tw32_wait_f(GRC_LOCAL_CTRL,
2629                             tp->grc_local_ctrl | grc_local_ctrl,
2630                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2631
2632                 if (!no_gpio2) {
2633                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2634                         tw32_wait_f(GRC_LOCAL_CTRL,
2635                                     tp->grc_local_ctrl | grc_local_ctrl,
2636                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2637                 }
2638         }
2639 }
2640
2641 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2642 {
2643         u32 msg = 0;
2644
2645         /* Serialize power state transitions */
2646         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2647                 return;
2648
2649         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2650                 msg = TG3_GPIO_MSG_NEED_VAUX;
2651
2652         msg = tg3_set_function_status(tp, msg);
2653
2654         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2655                 goto done;
2656
2657         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2658                 tg3_pwrsrc_switch_to_vaux(tp);
2659         else
2660                 tg3_pwrsrc_die_with_vmain(tp);
2661
2662 done:
2663         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2664 }
2665
2666 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2667 {
2668         bool need_vaux = false;
2669
2670         /* The GPIOs do something completely different on 57765. */
2671         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2672                 return;
2673
2674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2675             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2676             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2677                 tg3_frob_aux_power_5717(tp, include_wol ?
2678                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2679                 return;
2680         }
2681
2682         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2683                 struct net_device *dev_peer;
2684
2685                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2686
2687                 /* remove_one() may have been run on the peer. */
2688                 if (dev_peer) {
2689                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2690
2691                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2692                                 return;
2693
2694                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2695                             tg3_flag(tp_peer, ENABLE_ASF))
2696                                 need_vaux = true;
2697                 }
2698         }
2699
2700         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2701             tg3_flag(tp, ENABLE_ASF))
2702                 need_vaux = true;
2703
2704         if (need_vaux)
2705                 tg3_pwrsrc_switch_to_vaux(tp);
2706         else
2707                 tg3_pwrsrc_die_with_vmain(tp);
2708 }
2709
2710 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2711 {
2712         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2713                 return 1;
2714         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2715                 if (speed != SPEED_10)
2716                         return 1;
2717         } else if (speed == SPEED_10)
2718                 return 1;
2719
2720         return 0;
2721 }
2722
2723 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2724 {
2725         u32 val;
2726
2727         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2728                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2729                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2730                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2731
2732                         sg_dig_ctrl |=
2733                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2734                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2735                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2736                 }
2737                 return;
2738         }
2739
2740         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2741                 tg3_bmcr_reset(tp);
2742                 val = tr32(GRC_MISC_CFG);
2743                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2744                 udelay(40);
2745                 return;
2746         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2747                 u32 phytest;
2748                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2749                         u32 phy;
2750
2751                         tg3_writephy(tp, MII_ADVERTISE, 0);
2752                         tg3_writephy(tp, MII_BMCR,
2753                                      BMCR_ANENABLE | BMCR_ANRESTART);
2754
2755                         tg3_writephy(tp, MII_TG3_FET_TEST,
2756                                      phytest | MII_TG3_FET_SHADOW_EN);
2757                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2758                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2759                                 tg3_writephy(tp,
2760                                              MII_TG3_FET_SHDW_AUXMODE4,
2761                                              phy);
2762                         }
2763                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2764                 }
2765                 return;
2766         } else if (do_low_power) {
2767                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2768                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2769
2770                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2771                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2772                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2773                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2774         }
2775
2776         /* The PHY should not be powered down on some chips because
2777          * of bugs.
2778          */
2779         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2780             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2781             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2782              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2783                 return;
2784
2785         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2786             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2787                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2788                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2789                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2790                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2791         }
2792
2793         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2794 }
2795
2796 /* tp->lock is held. */
2797 static int tg3_nvram_lock(struct tg3 *tp)
2798 {
2799         if (tg3_flag(tp, NVRAM)) {
2800                 int i;
2801
2802                 if (tp->nvram_lock_cnt == 0) {
2803                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2804                         for (i = 0; i < 8000; i++) {
2805                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2806                                         break;
2807                                 udelay(20);
2808                         }
2809                         if (i == 8000) {
2810                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2811                                 return -ENODEV;
2812                         }
2813                 }
2814                 tp->nvram_lock_cnt++;
2815         }
2816         return 0;
2817 }
2818
2819 /* tp->lock is held. */
2820 static void tg3_nvram_unlock(struct tg3 *tp)
2821 {
2822         if (tg3_flag(tp, NVRAM)) {
2823                 if (tp->nvram_lock_cnt > 0)
2824                         tp->nvram_lock_cnt--;
2825                 if (tp->nvram_lock_cnt == 0)
2826                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2827         }
2828 }
2829
2830 /* tp->lock is held. */
2831 static void tg3_enable_nvram_access(struct tg3 *tp)
2832 {
2833         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2834                 u32 nvaccess = tr32(NVRAM_ACCESS);
2835
2836                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2837         }
2838 }
2839
2840 /* tp->lock is held. */
2841 static void tg3_disable_nvram_access(struct tg3 *tp)
2842 {
2843         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2844                 u32 nvaccess = tr32(NVRAM_ACCESS);
2845
2846                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2847         }
2848 }
2849
2850 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2851                                         u32 offset, u32 *val)
2852 {
2853         u32 tmp;
2854         int i;
2855
2856         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2857                 return -EINVAL;
2858
2859         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2860                                         EEPROM_ADDR_DEVID_MASK |
2861                                         EEPROM_ADDR_READ);
2862         tw32(GRC_EEPROM_ADDR,
2863              tmp |
2864              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2865              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2866               EEPROM_ADDR_ADDR_MASK) |
2867              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2868
2869         for (i = 0; i < 1000; i++) {
2870                 tmp = tr32(GRC_EEPROM_ADDR);
2871
2872                 if (tmp & EEPROM_ADDR_COMPLETE)
2873                         break;
2874                 msleep(1);
2875         }
2876         if (!(tmp & EEPROM_ADDR_COMPLETE))
2877                 return -EBUSY;
2878
2879         tmp = tr32(GRC_EEPROM_DATA);
2880
2881         /*
2882          * The data will always be opposite the native endian
2883          * format.  Perform a blind byteswap to compensate.
2884          */
2885         *val = swab32(tmp);
2886
2887         return 0;
2888 }
2889
2890 #define NVRAM_CMD_TIMEOUT 10000
2891
2892 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2893 {
2894         int i;
2895
2896         tw32(NVRAM_CMD, nvram_cmd);
2897         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2898                 udelay(10);
2899                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2900                         udelay(10);
2901                         break;
2902                 }
2903         }
2904
2905         if (i == NVRAM_CMD_TIMEOUT)
2906                 return -EBUSY;
2907
2908         return 0;
2909 }
2910
2911 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2912 {
2913         if (tg3_flag(tp, NVRAM) &&
2914             tg3_flag(tp, NVRAM_BUFFERED) &&
2915             tg3_flag(tp, FLASH) &&
2916             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2917             (tp->nvram_jedecnum == JEDEC_ATMEL))
2918
2919                 addr = ((addr / tp->nvram_pagesize) <<
2920                         ATMEL_AT45DB0X1B_PAGE_POS) +
2921                        (addr % tp->nvram_pagesize);
2922
2923         return addr;
2924 }
2925
2926 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2927 {
2928         if (tg3_flag(tp, NVRAM) &&
2929             tg3_flag(tp, NVRAM_BUFFERED) &&
2930             tg3_flag(tp, FLASH) &&
2931             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2932             (tp->nvram_jedecnum == JEDEC_ATMEL))
2933
2934                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2935                         tp->nvram_pagesize) +
2936                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2937
2938         return addr;
2939 }
2940
2941 /* NOTE: Data read in from NVRAM is byteswapped according to
2942  * the byteswapping settings for all other register accesses.
2943  * tg3 devices are BE devices, so on a BE machine, the data
2944  * returned will be exactly as it is seen in NVRAM.  On a LE
2945  * machine, the 32-bit value will be byteswapped.
2946  */
2947 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2948 {
2949         int ret;
2950
2951         if (!tg3_flag(tp, NVRAM))
2952                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2953
2954         offset = tg3_nvram_phys_addr(tp, offset);
2955
2956         if (offset > NVRAM_ADDR_MSK)
2957                 return -EINVAL;
2958
2959         ret = tg3_nvram_lock(tp);
2960         if (ret)
2961                 return ret;
2962
2963         tg3_enable_nvram_access(tp);
2964
2965         tw32(NVRAM_ADDR, offset);
2966         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2967                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2968
2969         if (ret == 0)
2970                 *val = tr32(NVRAM_RDDATA);
2971
2972         tg3_disable_nvram_access(tp);
2973
2974         tg3_nvram_unlock(tp);
2975
2976         return ret;
2977 }
2978
2979 /* Ensures NVRAM data is in bytestream format. */
2980 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2981 {
2982         u32 v;
2983         int res = tg3_nvram_read(tp, offset, &v);
2984         if (!res)
2985                 *val = cpu_to_be32(v);
2986         return res;
2987 }
2988
2989 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2990                                     u32 offset, u32 len, u8 *buf)
2991 {
2992         int i, j, rc = 0;
2993         u32 val;
2994
2995         for (i = 0; i < len; i += 4) {
2996                 u32 addr;
2997                 __be32 data;
2998
2999                 addr = offset + i;
3000
3001                 memcpy(&data, buf + i, 4);
3002
3003                 /*
3004                  * The SEEPROM interface expects the data to always be opposite
3005                  * the native endian format.  We accomplish this by reversing
3006                  * all the operations that would have been performed on the
3007                  * data from a call to tg3_nvram_read_be32().
3008                  */
3009                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3010
3011                 val = tr32(GRC_EEPROM_ADDR);
3012                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3013
3014                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3015                         EEPROM_ADDR_READ);
3016                 tw32(GRC_EEPROM_ADDR, val |
3017                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3018                         (addr & EEPROM_ADDR_ADDR_MASK) |
3019                         EEPROM_ADDR_START |
3020                         EEPROM_ADDR_WRITE);
3021
3022                 for (j = 0; j < 1000; j++) {
3023                         val = tr32(GRC_EEPROM_ADDR);
3024
3025                         if (val & EEPROM_ADDR_COMPLETE)
3026                                 break;
3027                         msleep(1);
3028                 }
3029                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3030                         rc = -EBUSY;
3031                         break;
3032                 }
3033         }
3034
3035         return rc;
3036 }
3037
3038 /* offset and length are dword aligned */
3039 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3040                 u8 *buf)
3041 {
3042         int ret = 0;
3043         u32 pagesize = tp->nvram_pagesize;
3044         u32 pagemask = pagesize - 1;
3045         u32 nvram_cmd;
3046         u8 *tmp;
3047
3048         tmp = kmalloc(pagesize, GFP_KERNEL);
3049         if (tmp == NULL)
3050                 return -ENOMEM;
3051
3052         while (len) {
3053                 int j;
3054                 u32 phy_addr, page_off, size;
3055
3056                 phy_addr = offset & ~pagemask;
3057
3058                 for (j = 0; j < pagesize; j += 4) {
3059                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3060                                                   (__be32 *) (tmp + j));
3061                         if (ret)
3062                                 break;
3063                 }
3064                 if (ret)
3065                         break;
3066
3067                 page_off = offset & pagemask;
3068                 size = pagesize;
3069                 if (len < size)
3070                         size = len;
3071
3072                 len -= size;
3073
3074                 memcpy(tmp + page_off, buf, size);
3075
3076                 offset = offset + (pagesize - page_off);
3077
3078                 tg3_enable_nvram_access(tp);
3079
3080                 /*
3081                  * Before we can erase the flash page, we need
3082                  * to issue a special "write enable" command.
3083                  */
3084                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3085
3086                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3087                         break;
3088
3089                 /* Erase the target page */
3090                 tw32(NVRAM_ADDR, phy_addr);
3091
3092                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3093                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3094
3095                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3096                         break;
3097
3098                 /* Issue another write enable to start the write. */
3099                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3100
3101                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3102                         break;
3103
3104                 for (j = 0; j < pagesize; j += 4) {
3105                         __be32 data;
3106
3107                         data = *((__be32 *) (tmp + j));
3108
3109                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3110
3111                         tw32(NVRAM_ADDR, phy_addr + j);
3112
3113                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3114                                 NVRAM_CMD_WR;
3115
3116                         if (j == 0)
3117                                 nvram_cmd |= NVRAM_CMD_FIRST;
3118                         else if (j == (pagesize - 4))
3119                                 nvram_cmd |= NVRAM_CMD_LAST;
3120
3121                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3122                         if (ret)
3123                                 break;
3124                 }
3125                 if (ret)
3126                         break;
3127         }
3128
3129         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3130         tg3_nvram_exec_cmd(tp, nvram_cmd);
3131
3132         kfree(tmp);
3133
3134         return ret;
3135 }
3136
3137 /* offset and length are dword aligned */
3138 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3139                 u8 *buf)
3140 {
3141         int i, ret = 0;
3142
3143         for (i = 0; i < len; i += 4, offset += 4) {
3144                 u32 page_off, phy_addr, nvram_cmd;
3145                 __be32 data;
3146
3147                 memcpy(&data, buf + i, 4);
3148                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3149
3150                 page_off = offset % tp->nvram_pagesize;
3151
3152                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3153
3154                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3155
3156                 if (page_off == 0 || i == 0)
3157                         nvram_cmd |= NVRAM_CMD_FIRST;
3158                 if (page_off == (tp->nvram_pagesize - 4))
3159                         nvram_cmd |= NVRAM_CMD_LAST;
3160
3161                 if (i == (len - 4))
3162                         nvram_cmd |= NVRAM_CMD_LAST;
3163
3164                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3165                     !tg3_flag(tp, FLASH) ||
3166                     !tg3_flag(tp, 57765_PLUS))
3167                         tw32(NVRAM_ADDR, phy_addr);
3168
3169                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3170                     !tg3_flag(tp, 5755_PLUS) &&
3171                     (tp->nvram_jedecnum == JEDEC_ST) &&
3172                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3173                         u32 cmd;
3174
3175                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3176                         ret = tg3_nvram_exec_cmd(tp, cmd);
3177                         if (ret)
3178                                 break;
3179                 }
3180                 if (!tg3_flag(tp, FLASH)) {
3181                         /* We always do complete word writes to eeprom. */
3182                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3183                 }
3184
3185                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3186                 if (ret)
3187                         break;
3188         }
3189         return ret;
3190 }
3191
3192 /* offset and length are dword aligned */
3193 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3194 {
3195         int ret;
3196
3197         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3198                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3199                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3200                 udelay(40);
3201         }
3202
3203         if (!tg3_flag(tp, NVRAM)) {
3204                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3205         } else {
3206                 u32 grc_mode;
3207
3208                 ret = tg3_nvram_lock(tp);
3209                 if (ret)
3210                         return ret;
3211
3212                 tg3_enable_nvram_access(tp);
3213                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3214                         tw32(NVRAM_WRITE1, 0x406);
3215
3216                 grc_mode = tr32(GRC_MODE);
3217                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3218
3219                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3220                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3221                                 buf);
3222                 } else {
3223                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3224                                 buf);
3225                 }
3226
3227                 grc_mode = tr32(GRC_MODE);
3228                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3229
3230                 tg3_disable_nvram_access(tp);
3231                 tg3_nvram_unlock(tp);
3232         }
3233
3234         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3235                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3236                 udelay(40);
3237         }
3238
3239         return ret;
3240 }
3241
3242 #define RX_CPU_SCRATCH_BASE     0x30000
3243 #define RX_CPU_SCRATCH_SIZE     0x04000
3244 #define TX_CPU_SCRATCH_BASE     0x34000
3245 #define TX_CPU_SCRATCH_SIZE     0x04000
3246
3247 /* tp->lock is held. */
3248 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3249 {
3250         int i;
3251
3252         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3253
3254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3255                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3256
3257                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3258                 return 0;
3259         }
3260         if (offset == RX_CPU_BASE) {
3261                 for (i = 0; i < 10000; i++) {
3262                         tw32(offset + CPU_STATE, 0xffffffff);
3263                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3264                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3265                                 break;
3266                 }
3267
3268                 tw32(offset + CPU_STATE, 0xffffffff);
3269                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3270                 udelay(10);
3271         } else {
3272                 for (i = 0; i < 10000; i++) {
3273                         tw32(offset + CPU_STATE, 0xffffffff);
3274                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3275                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3276                                 break;
3277                 }
3278         }
3279
3280         if (i >= 10000) {
3281                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3282                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3283                 return -ENODEV;
3284         }
3285
3286         /* Clear firmware's nvram arbitration. */
3287         if (tg3_flag(tp, NVRAM))
3288                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3289         return 0;
3290 }
3291
3292 struct fw_info {
3293         unsigned int fw_base;
3294         unsigned int fw_len;
3295         const __be32 *fw_data;
3296 };
3297
3298 /* tp->lock is held. */
3299 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3300                                  u32 cpu_scratch_base, int cpu_scratch_size,
3301                                  struct fw_info *info)
3302 {
3303         int err, lock_err, i;
3304         void (*write_op)(struct tg3 *, u32, u32);
3305
3306         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3307                 netdev_err(tp->dev,
3308                            "%s: Trying to load TX cpu firmware which is 5705\n",
3309                            __func__);
3310                 return -EINVAL;
3311         }
3312
3313         if (tg3_flag(tp, 5705_PLUS))
3314                 write_op = tg3_write_mem;
3315         else
3316                 write_op = tg3_write_indirect_reg32;
3317
3318         /* It is possible that bootcode is still loading at this point.
3319          * Get the nvram lock first before halting the cpu.
3320          */
3321         lock_err = tg3_nvram_lock(tp);
3322         err = tg3_halt_cpu(tp, cpu_base);
3323         if (!lock_err)
3324                 tg3_nvram_unlock(tp);
3325         if (err)
3326                 goto out;
3327
3328         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3329                 write_op(tp, cpu_scratch_base + i, 0);
3330         tw32(cpu_base + CPU_STATE, 0xffffffff);
3331         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3332         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3333                 write_op(tp, (cpu_scratch_base +
3334                               (info->fw_base & 0xffff) +
3335                               (i * sizeof(u32))),
3336                               be32_to_cpu(info->fw_data[i]));
3337
3338         err = 0;
3339
3340 out:
3341         return err;
3342 }
3343
3344 /* tp->lock is held. */
3345 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3346 {
3347         struct fw_info info;
3348         const __be32 *fw_data;
3349         int err, i;
3350
3351         fw_data = (void *)tp->fw->data;
3352
3353         /* Firmware blob starts with version numbers, followed by
3354            start address and length. We are setting complete length.
3355            length = end_address_of_bss - start_address_of_text.
3356            Remainder is the blob to be loaded contiguously
3357            from start address. */
3358
3359         info.fw_base = be32_to_cpu(fw_data[1]);
3360         info.fw_len = tp->fw->size - 12;
3361         info.fw_data = &fw_data[3];
3362
3363         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3364                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3365                                     &info);
3366         if (err)
3367                 return err;
3368
3369         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3370                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3371                                     &info);
3372         if (err)
3373                 return err;
3374
3375         /* Now startup only the RX cpu. */
3376         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3377         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3378
3379         for (i = 0; i < 5; i++) {
3380                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3381                         break;
3382                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3383                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3384                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3385                 udelay(1000);
3386         }
3387         if (i >= 5) {
3388                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3389                            "should be %08x\n", __func__,
3390                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3391                 return -ENODEV;
3392         }
3393         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3394         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3395
3396         return 0;
3397 }
3398
3399 /* tp->lock is held. */
3400 static int tg3_load_tso_firmware(struct tg3 *tp)
3401 {
3402         struct fw_info info;
3403         const __be32 *fw_data;
3404         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3405         int err, i;
3406
3407         if (tg3_flag(tp, HW_TSO_1) ||
3408             tg3_flag(tp, HW_TSO_2) ||
3409             tg3_flag(tp, HW_TSO_3))
3410                 return 0;
3411
3412         fw_data = (void *)tp->fw->data;
3413
3414         /* Firmware blob starts with version numbers, followed by
3415            start address and length. We are setting complete length.
3416            length = end_address_of_bss - start_address_of_text.
3417            Remainder is the blob to be loaded contiguously
3418            from start address. */
3419
3420         info.fw_base = be32_to_cpu(fw_data[1]);
3421         cpu_scratch_size = tp->fw_len;
3422         info.fw_len = tp->fw->size - 12;
3423         info.fw_data = &fw_data[3];
3424
3425         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3426                 cpu_base = RX_CPU_BASE;
3427                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3428         } else {
3429                 cpu_base = TX_CPU_BASE;
3430                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3431                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3432         }
3433
3434         err = tg3_load_firmware_cpu(tp, cpu_base,
3435                                     cpu_scratch_base, cpu_scratch_size,
3436                                     &info);
3437         if (err)
3438                 return err;
3439
3440         /* Now startup the cpu. */
3441         tw32(cpu_base + CPU_STATE, 0xffffffff);
3442         tw32_f(cpu_base + CPU_PC, info.fw_base);
3443
3444         for (i = 0; i < 5; i++) {
3445                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3446                         break;
3447                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3448                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3449                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3450                 udelay(1000);
3451         }
3452         if (i >= 5) {
3453                 netdev_err(tp->dev,
3454                            "%s fails to set CPU PC, is %08x should be %08x\n",
3455                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3456                 return -ENODEV;
3457         }
3458         tw32(cpu_base + CPU_STATE, 0xffffffff);
3459         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3460         return 0;
3461 }
3462
3463
3464 /* tp->lock is held. */
3465 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3466 {
3467         u32 addr_high, addr_low;
3468         int i;
3469
3470         addr_high = ((tp->dev->dev_addr[0] << 8) |
3471                      tp->dev->dev_addr[1]);
3472         addr_low = ((tp->dev->dev_addr[2] << 24) |
3473                     (tp->dev->dev_addr[3] << 16) |
3474                     (tp->dev->dev_addr[4] <<  8) |
3475                     (tp->dev->dev_addr[5] <<  0));
3476         for (i = 0; i < 4; i++) {
3477                 if (i == 1 && skip_mac_1)
3478                         continue;
3479                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3480                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3481         }
3482
3483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3484             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3485                 for (i = 0; i < 12; i++) {
3486                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3487                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3488                 }
3489         }
3490
3491         addr_high = (tp->dev->dev_addr[0] +
3492                      tp->dev->dev_addr[1] +
3493                      tp->dev->dev_addr[2] +
3494                      tp->dev->dev_addr[3] +
3495                      tp->dev->dev_addr[4] +
3496                      tp->dev->dev_addr[5]) &
3497                 TX_BACKOFF_SEED_MASK;
3498         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3499 }
3500
3501 static void tg3_enable_register_access(struct tg3 *tp)
3502 {
3503         /*
3504          * Make sure register accesses (indirect or otherwise) will function
3505          * correctly.
3506          */
3507         pci_write_config_dword(tp->pdev,
3508                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3509 }
3510
3511 static int tg3_power_up(struct tg3 *tp)
3512 {
3513         int err;
3514
3515         tg3_enable_register_access(tp);
3516
3517         err = pci_set_power_state(tp->pdev, PCI_D0);
3518         if (!err) {
3519                 /* Switch out of Vaux if it is a NIC */
3520                 tg3_pwrsrc_switch_to_vmain(tp);
3521         } else {
3522                 netdev_err(tp->dev, "Transition to D0 failed\n");
3523         }
3524
3525         return err;
3526 }
3527
3528 static int tg3_setup_phy(struct tg3 *, int);
3529
3530 static int tg3_power_down_prepare(struct tg3 *tp)
3531 {
3532         u32 misc_host_ctrl;
3533         bool device_should_wake, do_low_power;
3534
3535         tg3_enable_register_access(tp);
3536
3537         /* Restore the CLKREQ setting. */
3538         if (tg3_flag(tp, CLKREQ_BUG)) {
3539                 u16 lnkctl;
3540
3541                 pci_read_config_word(tp->pdev,
3542                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3543                                      &lnkctl);
3544                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3545                 pci_write_config_word(tp->pdev,
3546                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3547                                       lnkctl);
3548         }
3549
3550         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3551         tw32(TG3PCI_MISC_HOST_CTRL,
3552              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3553
3554         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3555                              tg3_flag(tp, WOL_ENABLE);
3556
3557         if (tg3_flag(tp, USE_PHYLIB)) {
3558                 do_low_power = false;
3559                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3560                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3561                         struct phy_device *phydev;
3562                         u32 phyid, advertising;
3563
3564                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3565
3566                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3567
3568                         tp->link_config.speed = phydev->speed;
3569                         tp->link_config.duplex = phydev->duplex;
3570                         tp->link_config.autoneg = phydev->autoneg;
3571                         tp->link_config.advertising = phydev->advertising;
3572
3573                         advertising = ADVERTISED_TP |
3574                                       ADVERTISED_Pause |
3575                                       ADVERTISED_Autoneg |
3576                                       ADVERTISED_10baseT_Half;
3577
3578                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3579                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3580                                         advertising |=
3581                                                 ADVERTISED_100baseT_Half |
3582                                                 ADVERTISED_100baseT_Full |
3583                                                 ADVERTISED_10baseT_Full;
3584                                 else
3585                                         advertising |= ADVERTISED_10baseT_Full;
3586                         }
3587
3588                         phydev->advertising = advertising;
3589
3590                         phy_start_aneg(phydev);
3591
3592                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3593                         if (phyid != PHY_ID_BCMAC131) {
3594                                 phyid &= PHY_BCM_OUI_MASK;
3595                                 if (phyid == PHY_BCM_OUI_1 ||
3596                                     phyid == PHY_BCM_OUI_2 ||
3597                                     phyid == PHY_BCM_OUI_3)
3598                                         do_low_power = true;
3599                         }
3600                 }
3601         } else {
3602                 do_low_power = true;
3603
3604                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3605                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3606
3607                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3608                         tg3_setup_phy(tp, 0);
3609         }
3610
3611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3612                 u32 val;
3613
3614                 val = tr32(GRC_VCPU_EXT_CTRL);
3615                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3616         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3617                 int i;
3618                 u32 val;
3619
3620                 for (i = 0; i < 200; i++) {
3621                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3622                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3623                                 break;
3624                         msleep(1);
3625                 }
3626         }
3627         if (tg3_flag(tp, WOL_CAP))
3628                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3629                                                      WOL_DRV_STATE_SHUTDOWN |
3630                                                      WOL_DRV_WOL |
3631                                                      WOL_SET_MAGIC_PKT);
3632
3633         if (device_should_wake) {
3634                 u32 mac_mode;
3635
3636                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3637                         if (do_low_power &&
3638                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3639                                 tg3_phy_auxctl_write(tp,
3640                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3641                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3642                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3643                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3644                                 udelay(40);
3645                         }
3646
3647                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3648                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3649                         else
3650                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3651
3652                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3653                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3654                             ASIC_REV_5700) {
3655                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3656                                              SPEED_100 : SPEED_10;
3657                                 if (tg3_5700_link_polarity(tp, speed))
3658                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3659                                 else
3660                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3661                         }
3662                 } else {
3663                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3664                 }
3665
3666                 if (!tg3_flag(tp, 5750_PLUS))
3667                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3668
3669                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3670                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3671                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3672                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3673
3674                 if (tg3_flag(tp, ENABLE_APE))
3675                         mac_mode |= MAC_MODE_APE_TX_EN |
3676                                     MAC_MODE_APE_RX_EN |
3677                                     MAC_MODE_TDE_ENABLE;
3678
3679                 tw32_f(MAC_MODE, mac_mode);
3680                 udelay(100);
3681
3682                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3683                 udelay(10);
3684         }
3685
3686         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3687             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3688              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3689                 u32 base_val;
3690
3691                 base_val = tp->pci_clock_ctrl;
3692                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3693                              CLOCK_CTRL_TXCLK_DISABLE);
3694
3695                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3696                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3697         } else if (tg3_flag(tp, 5780_CLASS) ||
3698                    tg3_flag(tp, CPMU_PRESENT) ||
3699                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3700                 /* do nothing */
3701         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3702                 u32 newbits1, newbits2;
3703
3704                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3705                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3706                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3707                                     CLOCK_CTRL_TXCLK_DISABLE |
3708                                     CLOCK_CTRL_ALTCLK);
3709                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3710                 } else if (tg3_flag(tp, 5705_PLUS)) {
3711                         newbits1 = CLOCK_CTRL_625_CORE;
3712                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3713                 } else {
3714                         newbits1 = CLOCK_CTRL_ALTCLK;
3715                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3716                 }
3717
3718                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3719                             40);
3720
3721                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3722                             40);
3723
3724                 if (!tg3_flag(tp, 5705_PLUS)) {
3725                         u32 newbits3;
3726
3727                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3728                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3729                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3730                                             CLOCK_CTRL_TXCLK_DISABLE |
3731                                             CLOCK_CTRL_44MHZ_CORE);
3732                         } else {
3733                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3734                         }
3735
3736                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3737                                     tp->pci_clock_ctrl | newbits3, 40);
3738                 }
3739         }
3740
3741         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3742                 tg3_power_down_phy(tp, do_low_power);
3743
3744         tg3_frob_aux_power(tp, true);
3745
3746         /* Workaround for unstable PLL clock */
3747         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3748             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3749                 u32 val = tr32(0x7d00);
3750
3751                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3752                 tw32(0x7d00, val);
3753                 if (!tg3_flag(tp, ENABLE_ASF)) {
3754                         int err;
3755
3756                         err = tg3_nvram_lock(tp);
3757                         tg3_halt_cpu(tp, RX_CPU_BASE);
3758                         if (!err)
3759                                 tg3_nvram_unlock(tp);
3760                 }
3761         }
3762
3763         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3764
3765         return 0;
3766 }
3767
3768 static void tg3_power_down(struct tg3 *tp)
3769 {
3770         tg3_power_down_prepare(tp);
3771
3772         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3773         pci_set_power_state(tp->pdev, PCI_D3hot);
3774 }
3775
3776 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3777 {
3778         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3779         case MII_TG3_AUX_STAT_10HALF:
3780                 *speed = SPEED_10;
3781                 *duplex = DUPLEX_HALF;
3782                 break;
3783
3784         case MII_TG3_AUX_STAT_10FULL:
3785                 *speed = SPEED_10;
3786                 *duplex = DUPLEX_FULL;
3787                 break;
3788
3789         case MII_TG3_AUX_STAT_100HALF:
3790                 *speed = SPEED_100;
3791                 *duplex = DUPLEX_HALF;
3792                 break;
3793
3794         case MII_TG3_AUX_STAT_100FULL:
3795                 *speed = SPEED_100;
3796                 *duplex = DUPLEX_FULL;
3797                 break;
3798
3799         case MII_TG3_AUX_STAT_1000HALF:
3800                 *speed = SPEED_1000;
3801                 *duplex = DUPLEX_HALF;
3802                 break;
3803
3804         case MII_TG3_AUX_STAT_1000FULL:
3805                 *speed = SPEED_1000;
3806                 *duplex = DUPLEX_FULL;
3807                 break;
3808
3809         default:
3810                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3811                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3812                                  SPEED_10;
3813                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3814                                   DUPLEX_HALF;
3815                         break;
3816                 }
3817                 *speed = SPEED_UNKNOWN;
3818                 *duplex = DUPLEX_UNKNOWN;
3819                 break;
3820         }
3821 }
3822
3823 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3824 {
3825         int err = 0;
3826         u32 val, new_adv;
3827
3828         new_adv = ADVERTISE_CSMA;
3829         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3830         new_adv |= mii_advertise_flowctrl(flowctrl);
3831
3832         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3833         if (err)
3834                 goto done;
3835
3836         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3837                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3838
3839                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3840                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3841                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3842
3843                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3844                 if (err)
3845                         goto done;
3846         }
3847
3848         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3849                 goto done;
3850
3851         tw32(TG3_CPMU_EEE_MODE,
3852              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3853
3854         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3855         if (!err) {
3856                 u32 err2;
3857
3858                 val = 0;
3859                 /* Advertise 100-BaseTX EEE ability */
3860                 if (advertise & ADVERTISED_100baseT_Full)
3861                         val |= MDIO_AN_EEE_ADV_100TX;
3862                 /* Advertise 1000-BaseT EEE ability */
3863                 if (advertise & ADVERTISED_1000baseT_Full)
3864                         val |= MDIO_AN_EEE_ADV_1000T;
3865                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3866                 if (err)
3867                         val = 0;
3868
3869                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3870                 case ASIC_REV_5717:
3871                 case ASIC_REV_57765:
3872                 case ASIC_REV_57766:
3873                 case ASIC_REV_5719:
3874                         /* If we advertised any eee advertisements above... */
3875                         if (val)
3876                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3877                                       MII_TG3_DSP_TAP26_RMRXSTO |
3878                                       MII_TG3_DSP_TAP26_OPCSINPT;
3879                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3880                         /* Fall through */
3881                 case ASIC_REV_5720:
3882                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3883                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3884                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3885                 }
3886
3887                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3888                 if (!err)
3889                         err = err2;
3890         }
3891
3892 done:
3893         return err;
3894 }
3895
3896 static void tg3_phy_copper_begin(struct tg3 *tp)
3897 {
3898         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3899             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3900                 u32 adv, fc;
3901
3902                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3903                         adv = ADVERTISED_10baseT_Half |
3904                               ADVERTISED_10baseT_Full;
3905                         if (tg3_flag(tp, WOL_SPEED_100MB))
3906                                 adv |= ADVERTISED_100baseT_Half |
3907                                        ADVERTISED_100baseT_Full;
3908
3909                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3910                 } else {
3911                         adv = tp->link_config.advertising;
3912                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3913                                 adv &= ~(ADVERTISED_1000baseT_Half |
3914                                          ADVERTISED_1000baseT_Full);
3915
3916                         fc = tp->link_config.flowctrl;
3917                 }
3918
3919                 tg3_phy_autoneg_cfg(tp, adv, fc);
3920
3921                 tg3_writephy(tp, MII_BMCR,
3922                              BMCR_ANENABLE | BMCR_ANRESTART);
3923         } else {
3924                 int i;
3925                 u32 bmcr, orig_bmcr;
3926
3927                 tp->link_config.active_speed = tp->link_config.speed;
3928                 tp->link_config.active_duplex = tp->link_config.duplex;
3929
3930                 bmcr = 0;
3931                 switch (tp->link_config.speed) {
3932                 default:
3933                 case SPEED_10:
3934                         break;
3935
3936                 case SPEED_100:
3937                         bmcr |= BMCR_SPEED100;
3938                         break;
3939
3940                 case SPEED_1000:
3941                         bmcr |= BMCR_SPEED1000;
3942                         break;
3943                 }
3944
3945                 if (tp->link_config.duplex == DUPLEX_FULL)
3946                         bmcr |= BMCR_FULLDPLX;
3947
3948                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3949                     (bmcr != orig_bmcr)) {
3950                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3951                         for (i = 0; i < 1500; i++) {
3952                                 u32 tmp;
3953
3954                                 udelay(10);
3955                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3956                                     tg3_readphy(tp, MII_BMSR, &tmp))
3957                                         continue;
3958                                 if (!(tmp & BMSR_LSTATUS)) {
3959                                         udelay(40);
3960                                         break;
3961                                 }
3962                         }
3963                         tg3_writephy(tp, MII_BMCR, bmcr);
3964                         udelay(40);
3965                 }
3966         }
3967 }
3968
3969 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3970 {
3971         int err;
3972
3973         /* Turn off tap power management. */
3974         /* Set Extended packet length bit */
3975         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3976
3977         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3978         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3979         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3980         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3981         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3982
3983         udelay(40);
3984
3985         return err;
3986 }
3987
3988 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3989 {
3990         u32 advmsk, tgtadv, advertising;
3991
3992         advertising = tp->link_config.advertising;
3993         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3994
3995         advmsk = ADVERTISE_ALL;
3996         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3997                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3998                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3999         }
4000
4001         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4002                 return false;
4003
4004         if ((*lcladv & advmsk) != tgtadv)
4005                 return false;
4006
4007         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4008                 u32 tg3_ctrl;
4009
4010                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4011
4012                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4013                         return false;
4014
4015                 if (tgtadv &&
4016                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4017                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4018                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4019                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4020                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4021                 } else {
4022                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4023                 }
4024
4025                 if (tg3_ctrl != tgtadv)
4026                         return false;
4027         }
4028
4029         return true;
4030 }
4031
4032 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4033 {
4034         u32 lpeth = 0;
4035
4036         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4037                 u32 val;
4038
4039                 if (tg3_readphy(tp, MII_STAT1000, &val))
4040                         return false;
4041
4042                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4043         }
4044
4045         if (tg3_readphy(tp, MII_LPA, rmtadv))
4046                 return false;
4047
4048         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4049         tp->link_config.rmt_adv = lpeth;
4050
4051         return true;
4052 }
4053
4054 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4055 {
4056         int current_link_up;
4057         u32 bmsr, val;
4058         u32 lcl_adv, rmt_adv;
4059         u16 current_speed;
4060         u8 current_duplex;
4061         int i, err;
4062
4063         tw32(MAC_EVENT, 0);
4064
4065         tw32_f(MAC_STATUS,
4066              (MAC_STATUS_SYNC_CHANGED |
4067               MAC_STATUS_CFG_CHANGED |
4068               MAC_STATUS_MI_COMPLETION |
4069               MAC_STATUS_LNKSTATE_CHANGED));
4070         udelay(40);
4071
4072         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4073                 tw32_f(MAC_MI_MODE,
4074                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4075                 udelay(80);
4076         }
4077
4078         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4079
4080         /* Some third-party PHYs need to be reset on link going
4081          * down.
4082          */
4083         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4084              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4085              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4086             netif_carrier_ok(tp->dev)) {
4087                 tg3_readphy(tp, MII_BMSR, &bmsr);
4088                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4089                     !(bmsr & BMSR_LSTATUS))
4090                         force_reset = 1;
4091         }
4092         if (force_reset)
4093                 tg3_phy_reset(tp);
4094
4095         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4096                 tg3_readphy(tp, MII_BMSR, &bmsr);
4097                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4098                     !tg3_flag(tp, INIT_COMPLETE))
4099                         bmsr = 0;
4100
4101                 if (!(bmsr & BMSR_LSTATUS)) {
4102                         err = tg3_init_5401phy_dsp(tp);
4103                         if (err)
4104                                 return err;
4105
4106                         tg3_readphy(tp, MII_BMSR, &bmsr);
4107                         for (i = 0; i < 1000; i++) {
4108                                 udelay(10);
4109                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4110                                     (bmsr & BMSR_LSTATUS)) {
4111                                         udelay(40);
4112                                         break;
4113                                 }
4114                         }
4115
4116                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4117                             TG3_PHY_REV_BCM5401_B0 &&
4118                             !(bmsr & BMSR_LSTATUS) &&
4119                             tp->link_config.active_speed == SPEED_1000) {
4120                                 err = tg3_phy_reset(tp);
4121                                 if (!err)
4122                                         err = tg3_init_5401phy_dsp(tp);
4123                                 if (err)
4124                                         return err;
4125                         }
4126                 }
4127         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4128                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4129                 /* 5701 {A0,B0} CRC bug workaround */
4130                 tg3_writephy(tp, 0x15, 0x0a75);
4131                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4132                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4133                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4134         }
4135
4136         /* Clear pending interrupts... */
4137         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4138         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4139
4140         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4141                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4142         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4143                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4144
4145         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4146             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4147                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4148                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4149                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4150                 else
4151                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4152         }
4153
4154         current_link_up = 0;
4155         current_speed = SPEED_UNKNOWN;
4156         current_duplex = DUPLEX_UNKNOWN;
4157         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4158         tp->link_config.rmt_adv = 0;
4159
4160         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4161                 err = tg3_phy_auxctl_read(tp,
4162                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4163                                           &val);
4164                 if (!err && !(val & (1 << 10))) {
4165                         tg3_phy_auxctl_write(tp,
4166                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4167                                              val | (1 << 10));
4168                         goto relink;
4169                 }
4170         }
4171
4172         bmsr = 0;
4173         for (i = 0; i < 100; i++) {
4174                 tg3_readphy(tp, MII_BMSR, &bmsr);
4175                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4176                     (bmsr & BMSR_LSTATUS))
4177                         break;
4178                 udelay(40);
4179         }
4180
4181         if (bmsr & BMSR_LSTATUS) {
4182                 u32 aux_stat, bmcr;
4183
4184                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4185                 for (i = 0; i < 2000; i++) {
4186                         udelay(10);
4187                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4188                             aux_stat)
4189                                 break;
4190                 }
4191
4192                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4193                                              &current_speed,
4194                                              &current_duplex);
4195
4196                 bmcr = 0;
4197                 for (i = 0; i < 200; i++) {
4198                         tg3_readphy(tp, MII_BMCR, &bmcr);
4199                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4200                                 continue;
4201                         if (bmcr && bmcr != 0x7fff)
4202                                 break;
4203                         udelay(10);
4204                 }
4205
4206                 lcl_adv = 0;
4207                 rmt_adv = 0;
4208
4209                 tp->link_config.active_speed = current_speed;
4210                 tp->link_config.active_duplex = current_duplex;
4211
4212                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4213                         if ((bmcr & BMCR_ANENABLE) &&
4214                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4215                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4216                                 current_link_up = 1;
4217                 } else {
4218                         if (!(bmcr & BMCR_ANENABLE) &&
4219                             tp->link_config.speed == current_speed &&
4220                             tp->link_config.duplex == current_duplex &&
4221                             tp->link_config.flowctrl ==
4222                             tp->link_config.active_flowctrl) {
4223                                 current_link_up = 1;
4224                         }
4225                 }
4226
4227                 if (current_link_up == 1 &&
4228                     tp->link_config.active_duplex == DUPLEX_FULL) {
4229                         u32 reg, bit;
4230
4231                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4232                                 reg = MII_TG3_FET_GEN_STAT;
4233                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4234                         } else {
4235                                 reg = MII_TG3_EXT_STAT;
4236                                 bit = MII_TG3_EXT_STAT_MDIX;
4237                         }
4238
4239                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4240                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4241
4242                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4243                 }
4244         }
4245
4246 relink:
4247         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4248                 tg3_phy_copper_begin(tp);
4249
4250                 tg3_readphy(tp, MII_BMSR, &bmsr);
4251                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4252                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4253                         current_link_up = 1;
4254         }
4255
4256         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4257         if (current_link_up == 1) {
4258                 if (tp->link_config.active_speed == SPEED_100 ||
4259                     tp->link_config.active_speed == SPEED_10)
4260                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4261                 else
4262                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4263         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4264                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4265         else
4266                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4267
4268         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4269         if (tp->link_config.active_duplex == DUPLEX_HALF)
4270                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4271
4272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4273                 if (current_link_up == 1 &&
4274                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4275                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4276                 else
4277                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4278         }
4279
4280         /* ??? Without this setting Netgear GA302T PHY does not
4281          * ??? send/receive packets...
4282          */
4283         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4284             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4285                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4286                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4287                 udelay(80);
4288         }
4289
4290         tw32_f(MAC_MODE, tp->mac_mode);
4291         udelay(40);
4292
4293         tg3_phy_eee_adjust(tp, current_link_up);
4294
4295         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4296                 /* Polled via timer. */
4297                 tw32_f(MAC_EVENT, 0);
4298         } else {
4299                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4300         }
4301         udelay(40);
4302
4303         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4304             current_link_up == 1 &&
4305             tp->link_config.active_speed == SPEED_1000 &&
4306             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4307                 udelay(120);
4308                 tw32_f(MAC_STATUS,
4309                      (MAC_STATUS_SYNC_CHANGED |
4310                       MAC_STATUS_CFG_CHANGED));
4311                 udelay(40);
4312                 tg3_write_mem(tp,
4313                               NIC_SRAM_FIRMWARE_MBOX,
4314                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4315         }
4316
4317         /* Prevent send BD corruption. */
4318         if (tg3_flag(tp, CLKREQ_BUG)) {
4319                 u16 oldlnkctl, newlnkctl;
4320
4321                 pci_read_config_word(tp->pdev,
4322                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4323                                      &oldlnkctl);
4324                 if (tp->link_config.active_speed == SPEED_100 ||
4325                     tp->link_config.active_speed == SPEED_10)
4326                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4327                 else
4328                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4329                 if (newlnkctl != oldlnkctl)
4330                         pci_write_config_word(tp->pdev,
4331                                               pci_pcie_cap(tp->pdev) +
4332                                               PCI_EXP_LNKCTL, newlnkctl);
4333         }
4334
4335         if (current_link_up != netif_carrier_ok(tp->dev)) {
4336                 if (current_link_up)
4337                         netif_carrier_on(tp->dev);
4338                 else
4339                         netif_carrier_off(tp->dev);
4340                 tg3_link_report(tp);
4341         }
4342
4343         return 0;
4344 }
4345
4346 struct tg3_fiber_aneginfo {
4347         int state;
4348 #define ANEG_STATE_UNKNOWN              0
4349 #define ANEG_STATE_AN_ENABLE            1
4350 #define ANEG_STATE_RESTART_INIT         2
4351 #define ANEG_STATE_RESTART              3
4352 #define ANEG_STATE_DISABLE_LINK_OK      4
4353 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4354 #define ANEG_STATE_ABILITY_DETECT       6
4355 #define ANEG_STATE_ACK_DETECT_INIT      7
4356 #define ANEG_STATE_ACK_DETECT           8
4357 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4358 #define ANEG_STATE_COMPLETE_ACK         10
4359 #define ANEG_STATE_IDLE_DETECT_INIT     11
4360 #define ANEG_STATE_IDLE_DETECT          12
4361 #define ANEG_STATE_LINK_OK              13
4362 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4363 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4364
4365         u32 flags;
4366 #define MR_AN_ENABLE            0x00000001
4367 #define MR_RESTART_AN           0x00000002
4368 #define MR_AN_COMPLETE          0x00000004
4369 #define MR_PAGE_RX              0x00000008
4370 #define MR_NP_LOADED            0x00000010
4371 #define MR_TOGGLE_TX            0x00000020
4372 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4373 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4374 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4375 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4376 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4377 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4378 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4379 #define MR_TOGGLE_RX            0x00002000
4380 #define MR_NP_RX                0x00004000
4381
4382 #define MR_LINK_OK              0x80000000
4383
4384         unsigned long link_time, cur_time;
4385
4386         u32 ability_match_cfg;
4387         int ability_match_count;
4388
4389         char ability_match, idle_match, ack_match;
4390
4391         u32 txconfig, rxconfig;
4392 #define ANEG_CFG_NP             0x00000080
4393 #define ANEG_CFG_ACK            0x00000040
4394 #define ANEG_CFG_RF2            0x00000020
4395 #define ANEG_CFG_RF1            0x00000010
4396 #define ANEG_CFG_PS2            0x00000001
4397 #define ANEG_CFG_PS1            0x00008000
4398 #define ANEG_CFG_HD             0x00004000
4399 #define ANEG_CFG_FD             0x00002000
4400 #define ANEG_CFG_INVAL          0x00001f06
4401
4402 };
4403 #define ANEG_OK         0
4404 #define ANEG_DONE       1
4405 #define ANEG_TIMER_ENAB 2
4406 #define ANEG_FAILED     -1
4407
4408 #define ANEG_STATE_SETTLE_TIME  10000
4409
4410 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4411                                    struct tg3_fiber_aneginfo *ap)
4412 {
4413         u16 flowctrl;
4414         unsigned long delta;
4415         u32 rx_cfg_reg;
4416         int ret;
4417
4418         if (ap->state == ANEG_STATE_UNKNOWN) {
4419                 ap->rxconfig = 0;
4420                 ap->link_time = 0;
4421                 ap->cur_time = 0;
4422                 ap->ability_match_cfg = 0;
4423                 ap->ability_match_count = 0;
4424                 ap->ability_match = 0;
4425                 ap->idle_match = 0;
4426                 ap->ack_match = 0;
4427         }
4428         ap->cur_time++;
4429
4430         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4431                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4432
4433                 if (rx_cfg_reg != ap->ability_match_cfg) {
4434                         ap->ability_match_cfg = rx_cfg_reg;
4435                         ap->ability_match = 0;
4436                         ap->ability_match_count = 0;
4437                 } else {
4438                         if (++ap->ability_match_count > 1) {
4439                                 ap->ability_match = 1;
4440                                 ap->ability_match_cfg = rx_cfg_reg;
4441                         }
4442                 }
4443                 if (rx_cfg_reg & ANEG_CFG_ACK)
4444                         ap->ack_match = 1;
4445                 else
4446                         ap->ack_match = 0;
4447
4448                 ap->idle_match = 0;
4449         } else {
4450                 ap->idle_match = 1;
4451                 ap->ability_match_cfg = 0;
4452                 ap->ability_match_count = 0;
4453                 ap->ability_match = 0;
4454                 ap->ack_match = 0;
4455
4456                 rx_cfg_reg = 0;
4457         }
4458
4459         ap->rxconfig = rx_cfg_reg;
4460         ret = ANEG_OK;
4461
4462         switch (ap->state) {
4463         case ANEG_STATE_UNKNOWN:
4464                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4465                         ap->state = ANEG_STATE_AN_ENABLE;
4466
4467                 /* fallthru */
4468         case ANEG_STATE_AN_ENABLE:
4469                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4470                 if (ap->flags & MR_AN_ENABLE) {
4471                         ap->link_time = 0;
4472                         ap->cur_time = 0;
4473                         ap->ability_match_cfg = 0;
4474                         ap->ability_match_count = 0;
4475                         ap->ability_match = 0;
4476                         ap->idle_match = 0;
4477                         ap->ack_match = 0;
4478
4479                         ap->state = ANEG_STATE_RESTART_INIT;
4480                 } else {
4481                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4482                 }
4483                 break;
4484
4485         case ANEG_STATE_RESTART_INIT:
4486                 ap->link_time = ap->cur_time;
4487                 ap->flags &= ~(MR_NP_LOADED);
4488                 ap->txconfig = 0;
4489                 tw32(MAC_TX_AUTO_NEG, 0);
4490                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4491                 tw32_f(MAC_MODE, tp->mac_mode);
4492                 udelay(40);
4493
4494                 ret = ANEG_TIMER_ENAB;
4495                 ap->state = ANEG_STATE_RESTART;
4496
4497                 /* fallthru */
4498         case ANEG_STATE_RESTART:
4499                 delta = ap->cur_time - ap->link_time;
4500                 if (delta > ANEG_STATE_SETTLE_TIME)
4501                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4502                 else
4503                         ret = ANEG_TIMER_ENAB;
4504                 break;
4505
4506         case ANEG_STATE_DISABLE_LINK_OK:
4507                 ret = ANEG_DONE;
4508                 break;
4509
4510         case ANEG_STATE_ABILITY_DETECT_INIT:
4511                 ap->flags &= ~(MR_TOGGLE_TX);
4512                 ap->txconfig = ANEG_CFG_FD;
4513                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4514                 if (flowctrl & ADVERTISE_1000XPAUSE)
4515                         ap->txconfig |= ANEG_CFG_PS1;
4516                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4517                         ap->txconfig |= ANEG_CFG_PS2;
4518                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4519                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4520                 tw32_f(MAC_MODE, tp->mac_mode);
4521                 udelay(40);
4522
4523                 ap->state = ANEG_STATE_ABILITY_DETECT;
4524                 break;
4525
4526         case ANEG_STATE_ABILITY_DETECT:
4527                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4528                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4529                 break;
4530
4531         case ANEG_STATE_ACK_DETECT_INIT:
4532                 ap->txconfig |= ANEG_CFG_ACK;
4533                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4534                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4535                 tw32_f(MAC_MODE, tp->mac_mode);
4536                 udelay(40);
4537
4538                 ap->state = ANEG_STATE_ACK_DETECT;
4539
4540                 /* fallthru */
4541         case ANEG_STATE_ACK_DETECT:
4542                 if (ap->ack_match != 0) {
4543                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4544                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4545                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4546                         } else {
4547                                 ap->state = ANEG_STATE_AN_ENABLE;
4548                         }
4549                 } else if (ap->ability_match != 0 &&
4550                            ap->rxconfig == 0) {
4551                         ap->state = ANEG_STATE_AN_ENABLE;
4552                 }
4553                 break;
4554
4555         case ANEG_STATE_COMPLETE_ACK_INIT:
4556                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4557                         ret = ANEG_FAILED;
4558                         break;
4559                 }
4560                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4561                                MR_LP_ADV_HALF_DUPLEX |
4562                                MR_LP_ADV_SYM_PAUSE |
4563                                MR_LP_ADV_ASYM_PAUSE |
4564                                MR_LP_ADV_REMOTE_FAULT1 |
4565                                MR_LP_ADV_REMOTE_FAULT2 |
4566                                MR_LP_ADV_NEXT_PAGE |
4567                                MR_TOGGLE_RX |
4568                                MR_NP_RX);
4569                 if (ap->rxconfig & ANEG_CFG_FD)
4570                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4571                 if (ap->rxconfig & ANEG_CFG_HD)
4572                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4573                 if (ap->rxconfig & ANEG_CFG_PS1)
4574                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4575                 if (ap->rxconfig & ANEG_CFG_PS2)
4576                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4577                 if (ap->rxconfig & ANEG_CFG_RF1)
4578                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4579                 if (ap->rxconfig & ANEG_CFG_RF2)
4580                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4581                 if (ap->rxconfig & ANEG_CFG_NP)
4582                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4583
4584                 ap->link_time = ap->cur_time;
4585
4586                 ap->flags ^= (MR_TOGGLE_TX);
4587                 if (ap->rxconfig & 0x0008)
4588                         ap->flags |= MR_TOGGLE_RX;
4589                 if (ap->rxconfig & ANEG_CFG_NP)
4590                         ap->flags |= MR_NP_RX;
4591                 ap->flags |= MR_PAGE_RX;
4592
4593                 ap->state = ANEG_STATE_COMPLETE_ACK;
4594                 ret = ANEG_TIMER_ENAB;
4595                 break;
4596
4597         case ANEG_STATE_COMPLETE_ACK:
4598                 if (ap->ability_match != 0 &&
4599                     ap->rxconfig == 0) {
4600                         ap->state = ANEG_STATE_AN_ENABLE;
4601                         break;
4602                 }
4603                 delta = ap->cur_time - ap->link_time;
4604                 if (delta > ANEG_STATE_SETTLE_TIME) {
4605                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4606                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4607                         } else {
4608                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4609                                     !(ap->flags & MR_NP_RX)) {
4610                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4611                                 } else {
4612                                         ret = ANEG_FAILED;
4613                                 }
4614                         }
4615                 }
4616                 break;
4617
4618         case ANEG_STATE_IDLE_DETECT_INIT:
4619                 ap->link_time = ap->cur_time;
4620                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4621                 tw32_f(MAC_MODE, tp->mac_mode);
4622                 udelay(40);
4623
4624                 ap->state = ANEG_STATE_IDLE_DETECT;
4625                 ret = ANEG_TIMER_ENAB;
4626                 break;
4627
4628         case ANEG_STATE_IDLE_DETECT:
4629                 if (ap->ability_match != 0 &&
4630                     ap->rxconfig == 0) {
4631                         ap->state = ANEG_STATE_AN_ENABLE;
4632                         break;
4633                 }
4634                 delta = ap->cur_time - ap->link_time;
4635                 if (delta > ANEG_STATE_SETTLE_TIME) {
4636                         /* XXX another gem from the Broadcom driver :( */
4637                         ap->state = ANEG_STATE_LINK_OK;
4638                 }
4639                 break;
4640
4641         case ANEG_STATE_LINK_OK:
4642                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4643                 ret = ANEG_DONE;
4644                 break;
4645
4646         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4647                 /* ??? unimplemented */
4648                 break;
4649
4650         case ANEG_STATE_NEXT_PAGE_WAIT:
4651                 /* ??? unimplemented */
4652                 break;
4653
4654         default:
4655                 ret = ANEG_FAILED;
4656                 break;
4657         }
4658
4659         return ret;
4660 }
4661
4662 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4663 {
4664         int res = 0;
4665         struct tg3_fiber_aneginfo aninfo;
4666         int status = ANEG_FAILED;
4667         unsigned int tick;
4668         u32 tmp;
4669
4670         tw32_f(MAC_TX_AUTO_NEG, 0);
4671
4672         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4673         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4674         udelay(40);
4675
4676         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4677         udelay(40);
4678
4679         memset(&aninfo, 0, sizeof(aninfo));
4680         aninfo.flags |= MR_AN_ENABLE;
4681         aninfo.state = ANEG_STATE_UNKNOWN;
4682         aninfo.cur_time = 0;
4683         tick = 0;
4684         while (++tick < 195000) {
4685                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4686                 if (status == ANEG_DONE || status == ANEG_FAILED)
4687                         break;
4688
4689                 udelay(1);
4690         }
4691
4692         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4693         tw32_f(MAC_MODE, tp->mac_mode);
4694         udelay(40);
4695
4696         *txflags = aninfo.txconfig;
4697         *rxflags = aninfo.flags;
4698
4699         if (status == ANEG_DONE &&
4700             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4701                              MR_LP_ADV_FULL_DUPLEX)))
4702                 res = 1;
4703
4704         return res;
4705 }
4706
4707 static void tg3_init_bcm8002(struct tg3 *tp)
4708 {
4709         u32 mac_status = tr32(MAC_STATUS);
4710         int i;
4711
4712         /* Reset when initting first time or we have a link. */
4713         if (tg3_flag(tp, INIT_COMPLETE) &&
4714             !(mac_status & MAC_STATUS_PCS_SYNCED))
4715                 return;
4716
4717         /* Set PLL lock range. */
4718         tg3_writephy(tp, 0x16, 0x8007);
4719
4720         /* SW reset */
4721         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4722
4723         /* Wait for reset to complete. */
4724         /* XXX schedule_timeout() ... */
4725         for (i = 0; i < 500; i++)
4726                 udelay(10);
4727
4728         /* Config mode; select PMA/Ch 1 regs. */
4729         tg3_writephy(tp, 0x10, 0x8411);
4730
4731         /* Enable auto-lock and comdet, select txclk for tx. */
4732         tg3_writephy(tp, 0x11, 0x0a10);
4733
4734         tg3_writephy(tp, 0x18, 0x00a0);
4735         tg3_writephy(tp, 0x16, 0x41ff);
4736
4737         /* Assert and deassert POR. */
4738         tg3_writephy(tp, 0x13, 0x0400);
4739         udelay(40);
4740         tg3_writephy(tp, 0x13, 0x0000);
4741
4742         tg3_writephy(tp, 0x11, 0x0a50);
4743         udelay(40);
4744         tg3_writephy(tp, 0x11, 0x0a10);
4745
4746         /* Wait for signal to stabilize */
4747         /* XXX schedule_timeout() ... */
4748         for (i = 0; i < 15000; i++)
4749                 udelay(10);
4750
4751         /* Deselect the channel register so we can read the PHYID
4752          * later.
4753          */
4754         tg3_writephy(tp, 0x10, 0x8011);
4755 }
4756
4757 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4758 {
4759         u16 flowctrl;
4760         u32 sg_dig_ctrl, sg_dig_status;
4761         u32 serdes_cfg, expected_sg_dig_ctrl;
4762         int workaround, port_a;
4763         int current_link_up;
4764
4765         serdes_cfg = 0;
4766         expected_sg_dig_ctrl = 0;
4767         workaround = 0;
4768         port_a = 1;
4769         current_link_up = 0;
4770
4771         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4772             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4773                 workaround = 1;
4774                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4775                         port_a = 0;
4776
4777                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4778                 /* preserve bits 20-23 for voltage regulator */
4779                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4780         }
4781
4782         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4783
4784         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4785                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4786                         if (workaround) {
4787                                 u32 val = serdes_cfg;
4788
4789                                 if (port_a)
4790                                         val |= 0xc010000;
4791                                 else
4792                                         val |= 0x4010000;
4793                                 tw32_f(MAC_SERDES_CFG, val);
4794                         }
4795
4796                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4797                 }
4798                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4799                         tg3_setup_flow_control(tp, 0, 0);
4800                         current_link_up = 1;
4801                 }
4802                 goto out;
4803         }
4804
4805         /* Want auto-negotiation.  */
4806         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4807
4808         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4809         if (flowctrl & ADVERTISE_1000XPAUSE)
4810                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4811         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4812                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4813
4814         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4815                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4816                     tp->serdes_counter &&
4817                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4818                                     MAC_STATUS_RCVD_CFG)) ==
4819                      MAC_STATUS_PCS_SYNCED)) {
4820                         tp->serdes_counter--;
4821                         current_link_up = 1;
4822                         goto out;
4823                 }
4824 restart_autoneg:
4825                 if (workaround)
4826                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4827                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4828                 udelay(5);
4829                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4830
4831                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4832                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4833         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4834                                  MAC_STATUS_SIGNAL_DET)) {
4835                 sg_dig_status = tr32(SG_DIG_STATUS);
4836                 mac_status = tr32(MAC_STATUS);
4837
4838                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4839                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4840                         u32 local_adv = 0, remote_adv = 0;
4841
4842                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4843                                 local_adv |= ADVERTISE_1000XPAUSE;
4844                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4845                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4846
4847                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4848                                 remote_adv |= LPA_1000XPAUSE;
4849                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4850                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4851
4852                         tp->link_config.rmt_adv =
4853                                            mii_adv_to_ethtool_adv_x(remote_adv);
4854
4855                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4856                         current_link_up = 1;
4857                         tp->serdes_counter = 0;
4858                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4859                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4860                         if (tp->serdes_counter)
4861                                 tp->serdes_counter--;
4862                         else {
4863                                 if (workaround) {
4864                                         u32 val = serdes_cfg;
4865
4866                                         if (port_a)
4867                                                 val |= 0xc010000;
4868                                         else
4869                                                 val |= 0x4010000;
4870
4871                                         tw32_f(MAC_SERDES_CFG, val);
4872                                 }
4873
4874                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4875                                 udelay(40);
4876
4877                                 /* Link parallel detection - link is up */
4878                                 /* only if we have PCS_SYNC and not */
4879                                 /* receiving config code words */
4880                                 mac_status = tr32(MAC_STATUS);
4881                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4882                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4883                                         tg3_setup_flow_control(tp, 0, 0);
4884                                         current_link_up = 1;
4885                                         tp->phy_flags |=
4886                                                 TG3_PHYFLG_PARALLEL_DETECT;
4887                                         tp->serdes_counter =
4888                                                 SERDES_PARALLEL_DET_TIMEOUT;
4889                                 } else
4890                                         goto restart_autoneg;
4891                         }
4892                 }
4893         } else {
4894                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4895                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4896         }
4897
4898 out:
4899         return current_link_up;
4900 }
4901
4902 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4903 {
4904         int current_link_up = 0;
4905
4906         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4907                 goto out;
4908
4909         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4910                 u32 txflags, rxflags;
4911                 int i;
4912
4913                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4914                         u32 local_adv = 0, remote_adv = 0;
4915
4916                         if (txflags & ANEG_CFG_PS1)
4917                                 local_adv |= ADVERTISE_1000XPAUSE;
4918                         if (txflags & ANEG_CFG_PS2)
4919                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4920
4921                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4922                                 remote_adv |= LPA_1000XPAUSE;
4923                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4924                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4925
4926                         tp->link_config.rmt_adv =
4927                                            mii_adv_to_ethtool_adv_x(remote_adv);
4928
4929                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4930
4931                         current_link_up = 1;
4932                 }
4933                 for (i = 0; i < 30; i++) {
4934                         udelay(20);
4935                         tw32_f(MAC_STATUS,
4936                                (MAC_STATUS_SYNC_CHANGED |
4937                                 MAC_STATUS_CFG_CHANGED));
4938                         udelay(40);
4939                         if ((tr32(MAC_STATUS) &
4940                              (MAC_STATUS_SYNC_CHANGED |
4941                               MAC_STATUS_CFG_CHANGED)) == 0)
4942                                 break;
4943                 }
4944
4945                 mac_status = tr32(MAC_STATUS);
4946                 if (current_link_up == 0 &&
4947                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4948                     !(mac_status & MAC_STATUS_RCVD_CFG))
4949                         current_link_up = 1;
4950         } else {
4951                 tg3_setup_flow_control(tp, 0, 0);
4952
4953                 /* Forcing 1000FD link up. */
4954                 current_link_up = 1;
4955
4956                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4957                 udelay(40);
4958
4959                 tw32_f(MAC_MODE, tp->mac_mode);
4960                 udelay(40);
4961         }
4962
4963 out:
4964         return current_link_up;
4965 }
4966
4967 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4968 {
4969         u32 orig_pause_cfg;
4970         u16 orig_active_speed;
4971         u8 orig_active_duplex;
4972         u32 mac_status;
4973         int current_link_up;
4974         int i;
4975
4976         orig_pause_cfg = tp->link_config.active_flowctrl;
4977         orig_active_speed = tp->link_config.active_speed;
4978         orig_active_duplex = tp->link_config.active_duplex;
4979
4980         if (!tg3_flag(tp, HW_AUTONEG) &&
4981             netif_carrier_ok(tp->dev) &&
4982             tg3_flag(tp, INIT_COMPLETE)) {
4983                 mac_status = tr32(MAC_STATUS);
4984                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4985                                MAC_STATUS_SIGNAL_DET |
4986                                MAC_STATUS_CFG_CHANGED |
4987                                MAC_STATUS_RCVD_CFG);
4988                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4989                                    MAC_STATUS_SIGNAL_DET)) {
4990                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4991                                             MAC_STATUS_CFG_CHANGED));
4992                         return 0;
4993                 }
4994         }
4995
4996         tw32_f(MAC_TX_AUTO_NEG, 0);
4997
4998         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4999         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5000         tw32_f(MAC_MODE, tp->mac_mode);
5001         udelay(40);
5002
5003         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5004                 tg3_init_bcm8002(tp);
5005
5006         /* Enable link change event even when serdes polling.  */
5007         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5008         udelay(40);
5009
5010         current_link_up = 0;
5011         tp->link_config.rmt_adv = 0;
5012         mac_status = tr32(MAC_STATUS);
5013
5014         if (tg3_flag(tp, HW_AUTONEG))
5015                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5016         else
5017                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5018
5019         tp->napi[0].hw_status->status =
5020                 (SD_STATUS_UPDATED |
5021                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5022
5023         for (i = 0; i < 100; i++) {
5024                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5025                                     MAC_STATUS_CFG_CHANGED));
5026                 udelay(5);
5027                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5028                                          MAC_STATUS_CFG_CHANGED |
5029                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5030                         break;
5031         }
5032
5033         mac_status = tr32(MAC_STATUS);
5034         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5035                 current_link_up = 0;
5036                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5037                     tp->serdes_counter == 0) {
5038                         tw32_f(MAC_MODE, (tp->mac_mode |
5039                                           MAC_MODE_SEND_CONFIGS));
5040                         udelay(1);
5041                         tw32_f(MAC_MODE, tp->mac_mode);
5042                 }
5043         }
5044
5045         if (current_link_up == 1) {
5046                 tp->link_config.active_speed = SPEED_1000;
5047                 tp->link_config.active_duplex = DUPLEX_FULL;
5048                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5049                                     LED_CTRL_LNKLED_OVERRIDE |
5050                                     LED_CTRL_1000MBPS_ON));
5051         } else {
5052                 tp->link_config.active_speed = SPEED_UNKNOWN;
5053                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5054                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055                                     LED_CTRL_LNKLED_OVERRIDE |
5056                                     LED_CTRL_TRAFFIC_OVERRIDE));
5057         }
5058
5059         if (current_link_up != netif_carrier_ok(tp->dev)) {
5060                 if (current_link_up)
5061                         netif_carrier_on(tp->dev);
5062                 else
5063                         netif_carrier_off(tp->dev);
5064                 tg3_link_report(tp);
5065         } else {
5066                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5067                 if (orig_pause_cfg != now_pause_cfg ||
5068                     orig_active_speed != tp->link_config.active_speed ||
5069                     orig_active_duplex != tp->link_config.active_duplex)
5070                         tg3_link_report(tp);
5071         }
5072
5073         return 0;
5074 }
5075
5076 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5077 {
5078         int current_link_up, err = 0;
5079         u32 bmsr, bmcr;
5080         u16 current_speed;
5081         u8 current_duplex;
5082         u32 local_adv, remote_adv;
5083
5084         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5085         tw32_f(MAC_MODE, tp->mac_mode);
5086         udelay(40);
5087
5088         tw32(MAC_EVENT, 0);
5089
5090         tw32_f(MAC_STATUS,
5091              (MAC_STATUS_SYNC_CHANGED |
5092               MAC_STATUS_CFG_CHANGED |
5093               MAC_STATUS_MI_COMPLETION |
5094               MAC_STATUS_LNKSTATE_CHANGED));
5095         udelay(40);
5096
5097         if (force_reset)
5098                 tg3_phy_reset(tp);
5099
5100         current_link_up = 0;
5101         current_speed = SPEED_UNKNOWN;
5102         current_duplex = DUPLEX_UNKNOWN;
5103         tp->link_config.rmt_adv = 0;
5104
5105         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5106         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5107         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5108                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5109                         bmsr |= BMSR_LSTATUS;
5110                 else
5111                         bmsr &= ~BMSR_LSTATUS;
5112         }
5113
5114         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5115
5116         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5117             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5118                 /* do nothing, just check for link up at the end */
5119         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5120                 u32 adv, newadv;
5121
5122                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5123                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5124                                  ADVERTISE_1000XPAUSE |
5125                                  ADVERTISE_1000XPSE_ASYM |
5126                                  ADVERTISE_SLCT);
5127
5128                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5129                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5130
5131                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5132                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5133                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5134                         tg3_writephy(tp, MII_BMCR, bmcr);
5135
5136                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5137                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5138                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5139
5140                         return err;
5141                 }
5142         } else {
5143                 u32 new_bmcr;
5144
5145                 bmcr &= ~BMCR_SPEED1000;
5146                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5147
5148                 if (tp->link_config.duplex == DUPLEX_FULL)
5149                         new_bmcr |= BMCR_FULLDPLX;
5150
5151                 if (new_bmcr != bmcr) {
5152                         /* BMCR_SPEED1000 is a reserved bit that needs
5153                          * to be set on write.
5154                          */
5155                         new_bmcr |= BMCR_SPEED1000;
5156
5157                         /* Force a linkdown */
5158                         if (netif_carrier_ok(tp->dev)) {
5159                                 u32 adv;
5160
5161                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5162                                 adv &= ~(ADVERTISE_1000XFULL |
5163                                          ADVERTISE_1000XHALF |
5164                                          ADVERTISE_SLCT);
5165                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5166                                 tg3_writephy(tp, MII_BMCR, bmcr |
5167                                                            BMCR_ANRESTART |
5168                                                            BMCR_ANENABLE);
5169                                 udelay(10);
5170                                 netif_carrier_off(tp->dev);
5171                         }
5172                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5173                         bmcr = new_bmcr;
5174                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5175                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5176                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5177                             ASIC_REV_5714) {
5178                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5179                                         bmsr |= BMSR_LSTATUS;
5180                                 else
5181                                         bmsr &= ~BMSR_LSTATUS;
5182                         }
5183                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5184                 }
5185         }
5186
5187         if (bmsr & BMSR_LSTATUS) {
5188                 current_speed = SPEED_1000;
5189                 current_link_up = 1;
5190                 if (bmcr & BMCR_FULLDPLX)
5191                         current_duplex = DUPLEX_FULL;
5192                 else
5193                         current_duplex = DUPLEX_HALF;
5194
5195                 local_adv = 0;
5196                 remote_adv = 0;
5197
5198                 if (bmcr & BMCR_ANENABLE) {
5199                         u32 common;
5200
5201                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5202                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5203                         common = local_adv & remote_adv;
5204                         if (common & (ADVERTISE_1000XHALF |
5205                                       ADVERTISE_1000XFULL)) {
5206                                 if (common & ADVERTISE_1000XFULL)
5207                                         current_duplex = DUPLEX_FULL;
5208                                 else
5209                                         current_duplex = DUPLEX_HALF;
5210
5211                                 tp->link_config.rmt_adv =
5212                                            mii_adv_to_ethtool_adv_x(remote_adv);
5213                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5214                                 /* Link is up via parallel detect */
5215                         } else {
5216                                 current_link_up = 0;
5217                         }
5218                 }
5219         }
5220
5221         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5222                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5223
5224         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5225         if (tp->link_config.active_duplex == DUPLEX_HALF)
5226                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5227
5228         tw32_f(MAC_MODE, tp->mac_mode);
5229         udelay(40);
5230
5231         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5232
5233         tp->link_config.active_speed = current_speed;
5234         tp->link_config.active_duplex = current_duplex;
5235
5236         if (current_link_up != netif_carrier_ok(tp->dev)) {
5237                 if (current_link_up)
5238                         netif_carrier_on(tp->dev);
5239                 else {
5240                         netif_carrier_off(tp->dev);
5241                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5242                 }
5243                 tg3_link_report(tp);
5244         }
5245         return err;
5246 }
5247
5248 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5249 {
5250         if (tp->serdes_counter) {
5251                 /* Give autoneg time to complete. */
5252                 tp->serdes_counter--;
5253                 return;
5254         }
5255
5256         if (!netif_carrier_ok(tp->dev) &&
5257             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5258                 u32 bmcr;
5259
5260                 tg3_readphy(tp, MII_BMCR, &bmcr);
5261                 if (bmcr & BMCR_ANENABLE) {
5262                         u32 phy1, phy2;
5263
5264                         /* Select shadow register 0x1f */
5265                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5266                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5267
5268                         /* Select expansion interrupt status register */
5269                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5270                                          MII_TG3_DSP_EXP1_INT_STAT);
5271                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5272                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5273
5274                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5275                                 /* We have signal detect and not receiving
5276                                  * config code words, link is up by parallel
5277                                  * detection.
5278                                  */
5279
5280                                 bmcr &= ~BMCR_ANENABLE;
5281                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5282                                 tg3_writephy(tp, MII_BMCR, bmcr);
5283                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5284                         }
5285                 }
5286         } else if (netif_carrier_ok(tp->dev) &&
5287                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5288                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5289                 u32 phy2;
5290
5291                 /* Select expansion interrupt status register */
5292                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5293                                  MII_TG3_DSP_EXP1_INT_STAT);
5294                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5295                 if (phy2 & 0x20) {
5296                         u32 bmcr;
5297
5298                         /* Config code words received, turn on autoneg. */
5299                         tg3_readphy(tp, MII_BMCR, &bmcr);
5300                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5301
5302                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5303
5304                 }
5305         }
5306 }
5307
5308 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5309 {
5310         u32 val;
5311         int err;
5312
5313         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5314                 err = tg3_setup_fiber_phy(tp, force_reset);
5315         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5316                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5317         else
5318                 err = tg3_setup_copper_phy(tp, force_reset);
5319
5320         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5321                 u32 scale;
5322
5323                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5324                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5325                         scale = 65;
5326                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5327                         scale = 6;
5328                 else
5329                         scale = 12;
5330
5331                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5332                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5333                 tw32(GRC_MISC_CFG, val);
5334         }
5335
5336         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5337               (6 << TX_LENGTHS_IPG_SHIFT);
5338         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5339                 val |= tr32(MAC_TX_LENGTHS) &
5340                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5341                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5342
5343         if (tp->link_config.active_speed == SPEED_1000 &&
5344             tp->link_config.active_duplex == DUPLEX_HALF)
5345                 tw32(MAC_TX_LENGTHS, val |
5346                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5347         else
5348                 tw32(MAC_TX_LENGTHS, val |
5349                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5350
5351         if (!tg3_flag(tp, 5705_PLUS)) {
5352                 if (netif_carrier_ok(tp->dev)) {
5353                         tw32(HOSTCC_STAT_COAL_TICKS,
5354                              tp->coal.stats_block_coalesce_usecs);
5355                 } else {
5356                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5357                 }
5358         }
5359
5360         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5361                 val = tr32(PCIE_PWR_MGMT_THRESH);
5362                 if (!netif_carrier_ok(tp->dev))
5363                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5364                               tp->pwrmgmt_thresh;
5365                 else
5366                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5367                 tw32(PCIE_PWR_MGMT_THRESH, val);
5368         }
5369
5370         return err;
5371 }
5372
5373 static inline int tg3_irq_sync(struct tg3 *tp)
5374 {
5375         return tp->irq_sync;
5376 }
5377
5378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5379 {
5380         int i;
5381
5382         dst = (u32 *)((u8 *)dst + off);
5383         for (i = 0; i < len; i += sizeof(u32))
5384                 *dst++ = tr32(off + i);
5385 }
5386
5387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5388 {
5389         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5390         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5391         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5392         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5393         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5394         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5395         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5396         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5397         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5398         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5399         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5400         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5401         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5402         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5403         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5404         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5405         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5406         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5407         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5408
5409         if (tg3_flag(tp, SUPPORT_MSIX))
5410                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5411
5412         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5413         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5414         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5415         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5416         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5417         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5418         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5419         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5420
5421         if (!tg3_flag(tp, 5705_PLUS)) {
5422                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5423                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5424                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5425         }
5426
5427         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5428         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5429         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5430         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5431         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5432
5433         if (tg3_flag(tp, NVRAM))
5434                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5435 }
5436
5437 static void tg3_dump_state(struct tg3 *tp)
5438 {
5439         int i;
5440         u32 *regs;
5441
5442         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5443         if (!regs) {
5444                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5445                 return;
5446         }
5447
5448         if (tg3_flag(tp, PCI_EXPRESS)) {
5449                 /* Read up to but not including private PCI registers */
5450                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5451                         regs[i / sizeof(u32)] = tr32(i);
5452         } else
5453                 tg3_dump_legacy_regs(tp, regs);
5454
5455         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5456                 if (!regs[i + 0] && !regs[i + 1] &&
5457                     !regs[i + 2] && !regs[i + 3])
5458                         continue;
5459
5460                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5461                            i * 4,
5462                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5463         }
5464
5465         kfree(regs);
5466
5467         for (i = 0; i < tp->irq_cnt; i++) {
5468                 struct tg3_napi *tnapi = &tp->napi[i];
5469
5470                 /* SW status block */
5471                 netdev_err(tp->dev,
5472                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5473                            i,
5474                            tnapi->hw_status->status,
5475                            tnapi->hw_status->status_tag,
5476                            tnapi->hw_status->rx_jumbo_consumer,
5477                            tnapi->hw_status->rx_consumer,
5478                            tnapi->hw_status->rx_mini_consumer,
5479                            tnapi->hw_status->idx[0].rx_producer,
5480                            tnapi->hw_status->idx[0].tx_consumer);
5481
5482                 netdev_err(tp->dev,
5483                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5484                            i,
5485                            tnapi->last_tag, tnapi->last_irq_tag,
5486                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5487                            tnapi->rx_rcb_ptr,
5488                            tnapi->prodring.rx_std_prod_idx,
5489                            tnapi->prodring.rx_std_cons_idx,
5490                            tnapi->prodring.rx_jmb_prod_idx,
5491                            tnapi->prodring.rx_jmb_cons_idx);
5492         }
5493 }
5494
5495 /* This is called whenever we suspect that the system chipset is re-
5496  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5497  * is bogus tx completions. We try to recover by setting the
5498  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5499  * in the workqueue.
5500  */
5501 static void tg3_tx_recover(struct tg3 *tp)
5502 {
5503         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5504                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5505
5506         netdev_warn(tp->dev,
5507                     "The system may be re-ordering memory-mapped I/O "
5508                     "cycles to the network device, attempting to recover. "
5509                     "Please report the problem to the driver maintainer "
5510                     "and include system chipset information.\n");
5511
5512         spin_lock(&tp->lock);
5513         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5514         spin_unlock(&tp->lock);
5515 }
5516
5517 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5518 {
5519         /* Tell compiler to fetch tx indices from memory. */
5520         barrier();
5521         return tnapi->tx_pending -
5522                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5523 }
5524
5525 /* Tigon3 never reports partial packet sends.  So we do not
5526  * need special logic to handle SKBs that have not had all
5527  * of their frags sent yet, like SunGEM does.
5528  */
5529 static void tg3_tx(struct tg3_napi *tnapi)
5530 {
5531         struct tg3 *tp = tnapi->tp;
5532         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5533         u32 sw_idx = tnapi->tx_cons;
5534         struct netdev_queue *txq;
5535         int index = tnapi - tp->napi;
5536         unsigned int pkts_compl = 0, bytes_compl = 0;
5537
5538         if (tg3_flag(tp, ENABLE_TSS))
5539                 index--;
5540
5541         txq = netdev_get_tx_queue(tp->dev, index);
5542
5543         while (sw_idx != hw_idx) {
5544                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5545                 struct sk_buff *skb = ri->skb;
5546                 int i, tx_bug = 0;
5547
5548                 if (unlikely(skb == NULL)) {
5549                         tg3_tx_recover(tp);
5550                         return;
5551                 }
5552
5553                 pci_unmap_single(tp->pdev,
5554                                  dma_unmap_addr(ri, mapping),
5555                                  skb_headlen(skb),
5556                                  PCI_DMA_TODEVICE);
5557
5558                 ri->skb = NULL;
5559
5560                 while (ri->fragmented) {
5561                         ri->fragmented = false;
5562                         sw_idx = NEXT_TX(sw_idx);
5563                         ri = &tnapi->tx_buffers[sw_idx];
5564                 }
5565
5566                 sw_idx = NEXT_TX(sw_idx);
5567
5568                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5569                         ri = &tnapi->tx_buffers[sw_idx];
5570                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5571                                 tx_bug = 1;
5572
5573                         pci_unmap_page(tp->pdev,
5574                                        dma_unmap_addr(ri, mapping),
5575                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5576                                        PCI_DMA_TODEVICE);
5577
5578                         while (ri->fragmented) {
5579                                 ri->fragmented = false;
5580                                 sw_idx = NEXT_TX(sw_idx);
5581                                 ri = &tnapi->tx_buffers[sw_idx];
5582                         }
5583
5584                         sw_idx = NEXT_TX(sw_idx);
5585                 }
5586
5587                 pkts_compl++;
5588                 bytes_compl += skb->len;
5589
5590                 dev_kfree_skb(skb);
5591
5592                 if (unlikely(tx_bug)) {
5593                         tg3_tx_recover(tp);
5594                         return;
5595                 }
5596         }
5597
5598         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5599
5600         tnapi->tx_cons = sw_idx;
5601
5602         /* Need to make the tx_cons update visible to tg3_start_xmit()
5603          * before checking for netif_queue_stopped().  Without the
5604          * memory barrier, there is a small possibility that tg3_start_xmit()
5605          * will miss it and cause the queue to be stopped forever.
5606          */
5607         smp_mb();
5608
5609         if (unlikely(netif_tx_queue_stopped(txq) &&
5610                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5611                 __netif_tx_lock(txq, smp_processor_id());
5612                 if (netif_tx_queue_stopped(txq) &&
5613                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5614                         netif_tx_wake_queue(txq);
5615                 __netif_tx_unlock(txq);
5616         }
5617 }
5618
5619 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5620 {
5621         if (!ri->data)
5622                 return;
5623
5624         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5625                          map_sz, PCI_DMA_FROMDEVICE);
5626         kfree(ri->data);
5627         ri->data = NULL;
5628 }
5629
5630 /* Returns size of skb allocated or < 0 on error.
5631  *
5632  * We only need to fill in the address because the other members
5633  * of the RX descriptor are invariant, see tg3_init_rings.
5634  *
5635  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5636  * posting buffers we only dirty the first cache line of the RX
5637  * descriptor (containing the address).  Whereas for the RX status
5638  * buffers the cpu only reads the last cacheline of the RX descriptor
5639  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5640  */
5641 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5642                             u32 opaque_key, u32 dest_idx_unmasked)
5643 {
5644         struct tg3_rx_buffer_desc *desc;
5645         struct ring_info *map;
5646         u8 *data;
5647         dma_addr_t mapping;
5648         int skb_size, data_size, dest_idx;
5649
5650         switch (opaque_key) {
5651         case RXD_OPAQUE_RING_STD:
5652                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5653                 desc = &tpr->rx_std[dest_idx];
5654                 map = &tpr->rx_std_buffers[dest_idx];
5655                 data_size = tp->rx_pkt_map_sz;
5656                 break;
5657
5658         case RXD_OPAQUE_RING_JUMBO:
5659                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5660                 desc = &tpr->rx_jmb[dest_idx].std;
5661                 map = &tpr->rx_jmb_buffers[dest_idx];
5662                 data_size = TG3_RX_JMB_MAP_SZ;
5663                 break;
5664
5665         default:
5666                 return -EINVAL;
5667         }
5668
5669         /* Do not overwrite any of the map or rp information
5670          * until we are sure we can commit to a new buffer.
5671          *
5672          * Callers depend upon this behavior and assume that
5673          * we leave everything unchanged if we fail.
5674          */
5675         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5676                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5677         data = kmalloc(skb_size, GFP_ATOMIC);
5678         if (!data)
5679                 return -ENOMEM;
5680
5681         mapping = pci_map_single(tp->pdev,
5682                                  data + TG3_RX_OFFSET(tp),
5683                                  data_size,
5684                                  PCI_DMA_FROMDEVICE);
5685         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5686                 kfree(data);
5687                 return -EIO;
5688         }
5689
5690         map->data = data;
5691         dma_unmap_addr_set(map, mapping, mapping);
5692
5693         desc->addr_hi = ((u64)mapping >> 32);
5694         desc->addr_lo = ((u64)mapping & 0xffffffff);
5695
5696         return data_size;
5697 }
5698
5699 /* We only need to move over in the address because the other
5700  * members of the RX descriptor are invariant.  See notes above
5701  * tg3_alloc_rx_data for full details.
5702  */
5703 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5704                            struct tg3_rx_prodring_set *dpr,
5705                            u32 opaque_key, int src_idx,
5706                            u32 dest_idx_unmasked)
5707 {
5708         struct tg3 *tp = tnapi->tp;
5709         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5710         struct ring_info *src_map, *dest_map;
5711         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5712         int dest_idx;
5713
5714         switch (opaque_key) {
5715         case RXD_OPAQUE_RING_STD:
5716                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5717                 dest_desc = &dpr->rx_std[dest_idx];
5718                 dest_map = &dpr->rx_std_buffers[dest_idx];
5719                 src_desc = &spr->rx_std[src_idx];
5720                 src_map = &spr->rx_std_buffers[src_idx];
5721                 break;
5722
5723         case RXD_OPAQUE_RING_JUMBO:
5724                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5725                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5726                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5727                 src_desc = &spr->rx_jmb[src_idx].std;
5728                 src_map = &spr->rx_jmb_buffers[src_idx];
5729                 break;
5730
5731         default:
5732                 return;
5733         }
5734
5735         dest_map->data = src_map->data;
5736         dma_unmap_addr_set(dest_map, mapping,
5737                            dma_unmap_addr(src_map, mapping));
5738         dest_desc->addr_hi = src_desc->addr_hi;
5739         dest_desc->addr_lo = src_desc->addr_lo;
5740
5741         /* Ensure that the update to the skb happens after the physical
5742          * addresses have been transferred to the new BD location.
5743          */
5744         smp_wmb();
5745
5746         src_map->data = NULL;
5747 }
5748
5749 /* The RX ring scheme is composed of multiple rings which post fresh
5750  * buffers to the chip, and one special ring the chip uses to report
5751  * status back to the host.
5752  *
5753  * The special ring reports the status of received packets to the
5754  * host.  The chip does not write into the original descriptor the
5755  * RX buffer was obtained from.  The chip simply takes the original
5756  * descriptor as provided by the host, updates the status and length
5757  * field, then writes this into the next status ring entry.
5758  *
5759  * Each ring the host uses to post buffers to the chip is described
5760  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5761  * it is first placed into the on-chip ram.  When the packet's length
5762  * is known, it walks down the TG3_BDINFO entries to select the ring.
5763  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5764  * which is within the range of the new packet's length is chosen.
5765  *
5766  * The "separate ring for rx status" scheme may sound queer, but it makes
5767  * sense from a cache coherency perspective.  If only the host writes
5768  * to the buffer post rings, and only the chip writes to the rx status
5769  * rings, then cache lines never move beyond shared-modified state.
5770  * If both the host and chip were to write into the same ring, cache line
5771  * eviction could occur since both entities want it in an exclusive state.
5772  */
5773 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5774 {
5775         struct tg3 *tp = tnapi->tp;
5776         u32 work_mask, rx_std_posted = 0;
5777         u32 std_prod_idx, jmb_prod_idx;
5778         u32 sw_idx = tnapi->rx_rcb_ptr;
5779         u16 hw_idx;
5780         int received;
5781         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5782
5783         hw_idx = *(tnapi->rx_rcb_prod_idx);
5784         /*
5785          * We need to order the read of hw_idx and the read of
5786          * the opaque cookie.
5787          */
5788         rmb();
5789         work_mask = 0;
5790         received = 0;
5791         std_prod_idx = tpr->rx_std_prod_idx;
5792         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5793         while (sw_idx != hw_idx && budget > 0) {
5794                 struct ring_info *ri;
5795                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5796                 unsigned int len;
5797                 struct sk_buff *skb;
5798                 dma_addr_t dma_addr;
5799                 u32 opaque_key, desc_idx, *post_ptr;
5800                 u8 *data;
5801
5802                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5803                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5804                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5805                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5806                         dma_addr = dma_unmap_addr(ri, mapping);
5807                         data = ri->data;
5808                         post_ptr = &std_prod_idx;
5809                         rx_std_posted++;
5810                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5811                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5812                         dma_addr = dma_unmap_addr(ri, mapping);
5813                         data = ri->data;
5814                         post_ptr = &jmb_prod_idx;
5815                 } else
5816                         goto next_pkt_nopost;
5817
5818                 work_mask |= opaque_key;
5819
5820                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5821                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5822                 drop_it:
5823                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5824                                        desc_idx, *post_ptr);
5825                 drop_it_no_recycle:
5826                         /* Other statistics kept track of by card. */
5827                         tp->rx_dropped++;
5828                         goto next_pkt;
5829                 }
5830
5831                 prefetch(data + TG3_RX_OFFSET(tp));
5832                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5833                       ETH_FCS_LEN;
5834
5835                 if (len > TG3_RX_COPY_THRESH(tp)) {
5836                         int skb_size;
5837
5838                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5839                                                     *post_ptr);
5840                         if (skb_size < 0)
5841                                 goto drop_it;
5842
5843                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5844                                          PCI_DMA_FROMDEVICE);
5845
5846                         skb = build_skb(data);
5847                         if (!skb) {
5848                                 kfree(data);
5849                                 goto drop_it_no_recycle;
5850                         }
5851                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5852                         /* Ensure that the update to the data happens
5853                          * after the usage of the old DMA mapping.
5854                          */
5855                         smp_wmb();
5856
5857                         ri->data = NULL;
5858
5859